path
stringlengths 9
117
| type
stringclasses 2
values | project
stringclasses 10
values | commit_hash
stringlengths 40
40
| commit_message
stringlengths 1
137
| ground_truth
stringlengths 0
2.74k
| main_code
stringlengths 102
3.37k
| context
stringlengths 0
14.7k
|
---|---|---|---|---|---|---|---|
tests.test_optim/test_optimizer8bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
2eb310835668f854c169953814f1d3b16a44346b
|
Fixed bug where beta2 was not passed into Lion 32-bit.
|
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer8bit(dim1, dim2, gtype, optim_name):
<0> if dim1 == 1 and dim2 == 1:
<1> return
<2> p1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1
<3> p2 = p1.clone()
<4> p1 = p1.float()
<5> blocksize = 2048
<6>
<7> torch_optimizer = str2optimizers[optim_name][0]([p1])
<8> bnb_optimizer = str2optimizers[optim_name][1]([p2])
<9>
<10> if gtype == torch.float32:
<11> atol, rtol = 3e-3, 1e-3
<12> patol, prtol = 1e-5, 1e-3
<13>
<14> else:
<15> atol, rtol = 3e-3, 1e-3
<16> patol, prtol = 1e-5, 1e-3
<17>
<18> errors = []
<19> relerrors = []
<20>
<21> for i in range(50):
<22> g = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.01
<23> p1.grad = g.clone().float()
<24> p2.grad = g.clone()
<25>
<26> bnb_optimizer.step()
<27> torch_optimizer.step()
<28>
<29> # since Lion can have pretty noisy updates where things lie at the boundary
<30> # allow up to 5 errors for Lion
<31> assert_most_approx_close(p1, p2.float(), patol, prtol, max_error_count=5)
<32>
<33> dequant_states = []
<34> for name1, name2, qmap, max_val in str2statenames[optim_name]:
<35> # print(bnb_optimizer.state[p2][max_val], name1)
<36> if "</s>
|
===========below chunk 0===========
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer8bit(dim1, dim2, gtype, optim_name):
# offset: 1
s1 = F.dequantize_blockwise(
code=bnb_optimizer.state[p2][qmap],
absmax=bnb_optimizer.state[p2][max_val],
A=bnb_optimizer.state[p2][name2],
blocksize=blocksize,
)
else:
s1 = F.dequantize(
code=bnb_optimizer.state[p2][qmap],
absmax=bnb_optimizer.state[p2][max_val],
A=bnb_optimizer.state[p2][name2],
)
num_not_close = (
torch.isclose(
torch_optimizer.state[p1][name1], s1, atol=atol, rtol=rtol
)
== 0
)
assert num_not_close.sum().item() < 20
dequant_states.append(s1.clone())
err = torch.abs(p1 - p2)
relerr = err / torch.abs(p1)
assert err.mean() < 0.0001
assert relerr.mean() < 0.001
errors.append(err.mean().item())
relerrors.append(relerr.mean().item())
if i % 10 == 0 and i > 0:
for (name1, name2, qmap, max_val), s in zip(
str2statenames[optim_name], dequant_states
):
s1cpy = s.clone()
raws1cpy = bnb_optimizer.state[p2][name2].clone()
qmap1 = bnb_optimizer.state[p2][qmap].clone()
path = get_temp_dir()
torch.save(bnb_optimizer.state_dict(), join(path</s>
===========below chunk 1===========
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer8bit(dim1, dim2, gtype, optim_name):
# offset: 2
<s>()
path = get_temp_dir()
torch.save(bnb_optimizer.state_dict(), join(path, "opt.pt"))
del bnb_optimizer
bnb_optimizer = None
bnb_optimizer = str2optimizers[optim_name][1]([p2])
bnb_optimizer.load_state_dict(torch.load(join(path, "opt.pt")))
rm_path(path)
torch.testing.assert_allclose(
raws1cpy, bnb_optimizer.state[p2][name2]
)
torch.testing.assert_allclose(
qmap1, bnb_optimizer.state[p2][qmap]
)
if "blockwise" in optim_name:
s1 = F.dequantize_blockwise(
code=bnb_optimizer.state[p2][qmap],
absmax=bnb_optimizer.state[p2][max_val],
A=bnb_optimizer.state[p2][name2],
blocksize=blocksize,
)
else:
s1 = F.dequantize(
code=bnb_optimizer.state[p2][qmap],
absmax=bnb_optimizer.state[p2][max_val],
A=bnb_optimizer.state[p2][name2],
)
torch.testing.assert_allclose(s1cpy, s1)
num_not_close = (
torch.isclose(
torch_optimizer.state[p1][name1],
s1,
atol=atol,
rtol=rtol,
</s>
===========below chunk 2===========
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer8bit(dim1, dim2, gtype, optim_name):
# offset: 3
<s>
== 0
)
assert num_not_close.sum().item() < 20
# since Lion can have pretty noisy updates where things lie at the boundary
# allow up to 5 errors for Lion
assert_most_approx_close(p1, p2.float(), patol, prtol, max_error_count=5)
# the parameters diverge quickly. Here we keep them close
# together so we can test against the Adam error
p1.data = p1.data.to(gtype).float()
p2.copy_(p1.data)
torch.testing.assert_allclose(p1.to(gtype), p2)
for (name1, name2, qmap, max_val), s in zip(
str2statenames[optim_name], dequant_states
):
torch_optimizer.state[p1][name1].copy_(s.data)
===========changed ref 0===========
# module: tests.test_optim
def assert_most_approx_close(a, b, rtol=1e-3, atol=1e-3, max_error_count=0):
idx = torch.isclose(a, b, rtol, atol)
error_count = (idx == 0).sum().item()
if error_count > max_error_count:
+ print(f"Too many values not close: assert {error_count} < {max_error_count}")
- print(f"Too many values not close: assert {sumval} < {count}")
torch.testing.assert_allclose(a, b, rtol, atol)
===========changed ref 1===========
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer32bit(dim1, dim2, gtype, optim_name):
if dim1 == 1 and dim2 == 1:
return
p1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1
p2 = p1.clone()
p1 = p1.float()
torch_optimizer = str2optimizers[optim_name][0]([p1])
bnb_optimizer = str2optimizers[optim_name][1]([p2])
if gtype == torch.float32:
atol, rtol = 1e-6, 1e-5
else:
atol, rtol = 1e-4, 1e-3
for i in range(k):
g = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.01
p1.grad = g.clone().float()
p2.grad = g.clone()
bnb_optimizer.step()
torch_optimizer.step()
+
for name1, name2 in str2statenames[optim_name]:
torch.testing.assert_allclose(
torch_optimizer.state[p1][name1],
bnb_optimizer.state[p2][name2],
atol=atol,
rtol=rtol,
)
+ # since Lion can have pretty noisy updates where things lie at the boundary
+ # allow up to 10 errors for Lion
+ assert_most_approx_close(p1, p2.float(), atol, rtol, max_error_count=10)
- torch.testing.assert_allclose(p1, p2.float(), atol=atol, rtol=rtol)
if i % (k // 5) == 0 and i > 0:
path = get_temp_dir()
torch.save(b</s>
|
|
bitsandbytes.functional/optimizer_update_32bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
2bb5c00ba9b0af840e9226a6100f2e968c0763f4
|
Added pre/post call to all lib calls. Fixes #120
|
<s>
g: Tensor,
p: Tensor,
state1: Tensor,
beta1: float,
eps: float,
step: int,
lr: float,
state2: Tensor = None,
beta2: float = 0.0,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
unorm_vec: Tensor = None,
max_unorm: float = 0.0,
skip_zeros=False,
) -> None:
<0> """
<1> Performs an inplace optimizer update with one or two optimizer states.
<2>
<3> Universal optimizer update for 32-bit state and 32/16-bit gradients/weights.
<4>
<5> Parameters
<6> ----------
<7> optimizer_name : str
<8> The name of the optimizer: {adam}.
<9> g : torch.Tensor
<10> Gradient tensor.
<11> p : torch.Tensor
<12> Parameter tensor.
<13> state1 : torch.Tensor
<14> Optimizer state 1.
<15> beta1 : float
<16> Optimizer beta1.
<17> eps : float
<18> Optimizer epsilon.
<19> weight_decay : float
<20> Weight decay.
<21> step : int
<22> Current optimizer step.
<23> lr : float
<24> The learning rate.
<25> state2 : torch.Tensor
<26> Optimizer state 2.
<27> beta2 : float
<28> Optimizer beta2.
<29> gnorm_scale : float
<30> The factor to rescale the gradient to the max clip value.
<31> unorm_vec : torch.Tensor
<32> The tensor for the update norm.
<33> max_unorm : float
<34> The maximum update norm relative to the weight norm.
<35> skip_zeros : bool
<36> Whether to skip zero-valued gradients or not (default: False).
<37> """
<38>
<39> param_norm = 0.0
<40> if max_unorm > 0.0:
<41> param_norm = torch.norm(p.data.float())
<42>
<43> if optimizer_name not in str2optimizer32bit:
<44> raise NotImplementedError(
<45> f</s>
|
===========below chunk 0===========
<s>,
p: Tensor,
state1: Tensor,
beta1: float,
eps: float,
step: int,
lr: float,
state2: Tensor = None,
beta2: float = 0.0,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
unorm_vec: Tensor = None,
max_unorm: float = 0.0,
skip_zeros=False,
) -> None:
# offset: 1
)
if g.dtype == torch.float32 and state1.dtype == torch.float32:
str2optimizer32bit[optimizer_name][0](
get_ptr(g),
get_ptr(p),
get_ptr(state1),
get_ptr(state2),
get_ptr(unorm_vec),
ct.c_float(max_unorm),
ct.c_float(param_norm),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_float(weight_decay),
ct.c_int32(step),
ct.c_float(lr),
ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros),
ct.c_int32(g.numel()),
)
elif g.dtype == torch.float16 and state1.dtype == torch.float32:
str2optimizer32bit[optimizer_name][1](
get_ptr(g),
get_ptr(p),
get_ptr(state1),
get_ptr(state2),
get_ptr(unorm_vec),
ct.c_float(max_unorm),
ct.c_float(param_norm),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_float(weight_decay),
ct.c_int32(step),
ct.c</s>
===========below chunk 1===========
<s>,
p: Tensor,
state1: Tensor,
beta1: float,
eps: float,
step: int,
lr: float,
state2: Tensor = None,
beta2: float = 0.0,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
unorm_vec: Tensor = None,
max_unorm: float = 0.0,
skip_zeros=False,
) -> None:
# offset: 2
<s>eps),
ct.c_float(weight_decay),
ct.c_int32(step),
ct.c_float(lr),
ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros),
ct.c_int32(g.numel()),
)
else:
raise ValueError(
f"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}"
)
|
|
bitsandbytes.functional/optimizer_update_8bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
2bb5c00ba9b0af840e9226a6100f2e968c0763f4
|
Added pre/post call to all lib calls. Fixes #120
|
<s> beta2: float,
eps: float,
step: int,
lr: float,
qmap1: Tensor,
qmap2: Tensor,
max1: Tensor,
max2: Tensor,
new_max1: Tensor,
new_max2: Tensor,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
unorm_vec: Tensor = None,
max_unorm: float = 0.0,
) -> None:
<0> """
<1> Performs an inplace Adam update.
<2>
<3> Universal Adam update for 32/8-bit state and 32/16-bit gradients/weights.
<4> Uses AdamW formulation if weight decay > 0.0.
<5>
<6> Parameters
<7> ----------
<8> optimizer_name : str
<9> The name of the optimizer. Choices {adam, momentum}
<10> g : torch.Tensor
<11> Gradient tensor.
<12> p : torch.Tensor
<13> Parameter tensor.
<14> state1 : torch.Tensor
<15> Adam state 1.
<16> state2 : torch.Tensor
<17> Adam state 2.
<18> beta1 : float
<19> Adam beta1.
<20> beta2 : float
<21> Adam beta2.
<22> eps : float
<23> Adam epsilon.
<24> weight_decay : float
<25> Weight decay.
<26> step : int
<27> Current optimizer step.
<28> lr : float
<29> The learning rate.
<30> qmap1 : torch.Tensor
<31> Quantization map for first Adam state.
<32> qmap2 : torch.Tensor
<33> Quantization map for second Adam state.
<34> max1 : torch.Tensor
<35> Max value for first Adam state update.
<36> max2 : torch.Tensor
<37> Max value for second Adam state update.
<38> new_max1 : torch.Tensor
<39> Max value for the next Adam update of the first state.
<40> new_max2 : torch.Tensor
<41> Max value for the next Adam update of the second state.
<42> g</s>
|
===========below chunk 0===========
<s>,
eps: float,
step: int,
lr: float,
qmap1: Tensor,
qmap2: Tensor,
max1: Tensor,
max2: Tensor,
new_max1: Tensor,
new_max2: Tensor,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
unorm_vec: Tensor = None,
max_unorm: float = 0.0,
) -> None:
# offset: 1
The factor to rescale the gradient to the max clip value.
unorm_vec : torch.Tensor
The tensor for the update norm.
max_unorm : float
The maximum update norm relative to the weight norm.
"""
param_norm = 0.0
if max_unorm > 0.0:
param_norm = torch.norm(p.data.float())
if g.dtype == torch.float32 and state1.dtype == torch.uint8:
str2optimizer8bit[optimizer_name][0](
get_ptr(p),
get_ptr(g),
get_ptr(state1),
get_ptr(state2),
get_ptr(unorm_vec),
ct.c_float(max_unorm),
ct.c_float(param_norm),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_int32(step),
ct.c_float(lr),
get_ptr(qmap1),
get_ptr(qmap2),
get_ptr(max1),
get_ptr(max2),
get_ptr(new_max1),
get_ptr(new_max2),
ct.c_float(weight_decay),
ct.c_float(gnorm_scale),
ct.c_int32(g.numel()),
)
elif g.dtype == torch.float16 and state1.dtype == torch.uint8:
str2optimizer</s>
===========below chunk 1===========
<s>,
eps: float,
step: int,
lr: float,
qmap1: Tensor,
qmap2: Tensor,
max1: Tensor,
max2: Tensor,
new_max1: Tensor,
new_max2: Tensor,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
unorm_vec: Tensor = None,
max_unorm: float = 0.0,
) -> None:
# offset: 2
<s>
)
elif g.dtype == torch.float16 and state1.dtype == torch.uint8:
str2optimizer8bit[optimizer_name][1](
get_ptr(p),
get_ptr(g),
get_ptr(state1),
get_ptr(state2),
get_ptr(unorm_vec),
ct.c_float(max_unorm),
ct.c_float(param_norm),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_int32(step),
ct.c_float(lr),
get_ptr(qmap1),
get_ptr(qmap2),
get_ptr(max1),
get_ptr(max2),
get_ptr(new_max1),
get_ptr(new_max2),
ct.c_float(weight_decay),
ct.c_float(gnorm_scale),
ct.c_int32(g.numel()),
)
else:
raise ValueError(
f"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}"
)
===========changed ref 0===========
<s>
g: Tensor,
p: Tensor,
state1: Tensor,
beta1: float,
eps: float,
step: int,
lr: float,
state2: Tensor = None,
beta2: float = 0.0,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
unorm_vec: Tensor = None,
max_unorm: float = 0.0,
skip_zeros=False,
) -> None:
"""
Performs an inplace optimizer update with one or two optimizer states.
Universal optimizer update for 32-bit state and 32/16-bit gradients/weights.
Parameters
----------
optimizer_name : str
The name of the optimizer: {adam}.
g : torch.Tensor
Gradient tensor.
p : torch.Tensor
Parameter tensor.
state1 : torch.Tensor
Optimizer state 1.
beta1 : float
Optimizer beta1.
eps : float
Optimizer epsilon.
weight_decay : float
Weight decay.
step : int
Current optimizer step.
lr : float
The learning rate.
state2 : torch.Tensor
Optimizer state 2.
beta2 : float
Optimizer beta2.
gnorm_scale : float
The factor to rescale the gradient to the max clip value.
unorm_vec : torch.Tensor
The tensor for the update norm.
max_unorm : float
The maximum update norm relative to the weight norm.
skip_zeros : bool
Whether to skip zero-valued gradients or not (default: False).
"""
param_norm = 0.0
if max_unorm > 0.0:
param_norm = torch.norm(p.data.float())
if optimizer_name not in str2optimizer32bit:
raise NotImplementedError(
f'Optimizer not implemented: {optimizer_name}. Choices: {",".join(str2optimizer32bit.keys())}'
)
+ prev_device = pre_call(g.device)</s>
===========changed ref 1===========
<s>,
p: Tensor,
state1: Tensor,
beta1: float,
eps: float,
step: int,
lr: float,
state2: Tensor = None,
beta2: float = 0.0,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
unorm_vec: Tensor = None,
max_unorm: float = 0.0,
skip_zeros=False,
) -> None:
# offset: 1
<s>join(str2optimizer32bit.keys())}'
)
+ prev_device = pre_call(g.device)
+ is_on_gpu([g, p, state1, state2, unorm_vec])
if g.dtype == torch.float32 and state1.dtype == torch.float32:
str2optimizer32bit[optimizer_name][0](
get_ptr(g),
get_ptr(p),
get_ptr(state1),
get_ptr(state2),
get_ptr(unorm_vec),
ct.c_float(max_unorm),
ct.c_float(param_norm),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_float(weight_decay),
ct.c_int32(step),
ct.c_float(lr),
ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros),
ct.c_int32(g.numel()),
)
elif g.dtype == torch.float16 and state1.dtype == torch.float32:
str2optimizer32bit[optimizer_name][1](
get_ptr(g),
get_ptr(p),
get_ptr(state1),
get_ptr(state2),
get_ptr(unorm_vec),
</s>
|
|
bitsandbytes.functional/optimizer_update_8bit_blockwise
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
2bb5c00ba9b0af840e9226a6100f2e968c0763f4
|
Added pre/post call to all lib calls. Fixes #120
|
<0>:<add> prev_device = pre_call(g.device)
<add> is_on_gpu([g, p, state1, state2, qmap1, qmap2, absmax1, absmax2])
|
<s> g: Tensor,
p: Tensor,
state1: Tensor,
state2: Tensor,
beta1: float,
beta2: float,
eps: float,
step: int,
lr: float,
qmap1: Tensor,
qmap2: Tensor,
absmax1: Tensor,
absmax2: Tensor,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
skip_zeros=False,
) -> None:
<0> if g.dtype == torch.float32 and state1.dtype == torch.uint8:
<1> str2optimizer8bit_blockwise[optimizer_name][0](
<2> get_ptr(p),
<3> get_ptr(g),
<4> get_ptr(state1),
<5> get_ptr(state2),
<6> ct.c_float(beta1),
<7> ct.c_float(beta2),
<8> ct.c_float(eps),
<9> ct.c_int32(step),
<10> ct.c_float(lr),
<11> get_ptr(qmap1),
<12> get_ptr(qmap2),
<13> get_ptr(absmax1),
<14> get_ptr(absmax2),
<15> ct.c_float(weight_decay),
<16> ct.c_float(gnorm_scale),
<17> ct.c_bool(skip_zeros),
<18> ct.c_int32(g.numel()),
<19> )
<20> elif g.dtype == torch.float16 and state1.dtype == torch.uint8:
<21> str2optimizer8bit_blockwise[optimizer_name][1](
<22> get_ptr(p),
<23> get_ptr(g),
<24> get_ptr(state1),
<25> get_ptr(state2),
<26> ct.c_float(beta1),
<27> ct.c_float(beta2),
<28> ct.c_float(eps),
<29> ct.c_int32(step),
<30> ct.c_float(lr),
<31> get_ptr</s>
|
===========below chunk 0===========
<s> p: Tensor,
state1: Tensor,
state2: Tensor,
beta1: float,
beta2: float,
eps: float,
step: int,
lr: float,
qmap1: Tensor,
qmap2: Tensor,
absmax1: Tensor,
absmax2: Tensor,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
skip_zeros=False,
) -> None:
# offset: 1
get_ptr(qmap2),
get_ptr(absmax1),
get_ptr(absmax2),
ct.c_float(weight_decay),
ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros),
ct.c_int32(g.numel()),
)
else:
raise ValueError(
f"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}"
)
===========changed ref 0===========
<s>
g: Tensor,
p: Tensor,
state1: Tensor,
beta1: float,
eps: float,
step: int,
lr: float,
state2: Tensor = None,
beta2: float = 0.0,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
unorm_vec: Tensor = None,
max_unorm: float = 0.0,
skip_zeros=False,
) -> None:
"""
Performs an inplace optimizer update with one or two optimizer states.
Universal optimizer update for 32-bit state and 32/16-bit gradients/weights.
Parameters
----------
optimizer_name : str
The name of the optimizer: {adam}.
g : torch.Tensor
Gradient tensor.
p : torch.Tensor
Parameter tensor.
state1 : torch.Tensor
Optimizer state 1.
beta1 : float
Optimizer beta1.
eps : float
Optimizer epsilon.
weight_decay : float
Weight decay.
step : int
Current optimizer step.
lr : float
The learning rate.
state2 : torch.Tensor
Optimizer state 2.
beta2 : float
Optimizer beta2.
gnorm_scale : float
The factor to rescale the gradient to the max clip value.
unorm_vec : torch.Tensor
The tensor for the update norm.
max_unorm : float
The maximum update norm relative to the weight norm.
skip_zeros : bool
Whether to skip zero-valued gradients or not (default: False).
"""
param_norm = 0.0
if max_unorm > 0.0:
param_norm = torch.norm(p.data.float())
if optimizer_name not in str2optimizer32bit:
raise NotImplementedError(
f'Optimizer not implemented: {optimizer_name}. Choices: {",".join(str2optimizer32bit.keys())}'
)
+ prev_device = pre_call(g.device)</s>
===========changed ref 1===========
<s>,
p: Tensor,
state1: Tensor,
beta1: float,
eps: float,
step: int,
lr: float,
state2: Tensor = None,
beta2: float = 0.0,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
unorm_vec: Tensor = None,
max_unorm: float = 0.0,
skip_zeros=False,
) -> None:
# offset: 1
<s>join(str2optimizer32bit.keys())}'
)
+ prev_device = pre_call(g.device)
+ is_on_gpu([g, p, state1, state2, unorm_vec])
if g.dtype == torch.float32 and state1.dtype == torch.float32:
str2optimizer32bit[optimizer_name][0](
get_ptr(g),
get_ptr(p),
get_ptr(state1),
get_ptr(state2),
get_ptr(unorm_vec),
ct.c_float(max_unorm),
ct.c_float(param_norm),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_float(weight_decay),
ct.c_int32(step),
ct.c_float(lr),
ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros),
ct.c_int32(g.numel()),
)
elif g.dtype == torch.float16 and state1.dtype == torch.float32:
str2optimizer32bit[optimizer_name][1](
get_ptr(g),
get_ptr(p),
get_ptr(state1),
get_ptr(state2),
get_ptr(unorm_vec),
</s>
===========changed ref 2===========
<s>,
p: Tensor,
state1: Tensor,
beta1: float,
eps: float,
step: int,
lr: float,
state2: Tensor = None,
beta2: float = 0.0,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
unorm_vec: Tensor = None,
max_unorm: float = 0.0,
skip_zeros=False,
) -> None:
# offset: 2
<s>.c_float(max_unorm),
ct.c_float(param_norm),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_float(weight_decay),
ct.c_int32(step),
ct.c_float(lr),
ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros),
ct.c_int32(g.numel()),
)
else:
raise ValueError(
f"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}"
)
+ post_call(prev_device)
|
bitsandbytes.functional/percentile_clipping
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
2bb5c00ba9b0af840e9226a6100f2e968c0763f4
|
Added pre/post call to all lib calls. Fixes #120
|
<10>:<add> prev_device = pre_call(grad.device)
<27>:<add> post_call(prev_device)
|
# module: bitsandbytes.functional
def percentile_clipping(
grad: Tensor, gnorm_vec: Tensor, step: int, percentile: int = 5
):
<0> """Applies percentile clipping
<1>
<2> grad: torch.Tensor
<3> The gradient tensor.
<4> gnorm_vec: torch.Tensor
<5> Vector of gradient norms. 100 elements expected.
<6> step: int
<7> The current optimiation steps (number of past gradient norms).
<8>
<9> """
<10> is_on_gpu([grad, gnorm_vec])
<11> if grad.dtype == torch.float32:
<12> lib.cpercentile_clipping_g32(
<13> get_ptr(grad),
<14> get_ptr(gnorm_vec),
<15> ct.c_int32(step),
<16> ct.c_int32(grad.numel()),
<17> )
<18> elif grad.dtype == torch.float16:
<19> lib.cpercentile_clipping_g16(
<20> get_ptr(grad),
<21> get_ptr(gnorm_vec),
<22> ct.c_int32(step),
<23> ct.c_int32(grad.numel()),
<24> )
<25> else:
<26> raise ValueError(f"Gradient type {grad.dtype} not supported!")
<27>
<28> current_gnorm = torch.sqrt(gnorm_vec[step % 100])
<29> vals, idx = torch.sort(gnorm_vec)
<30> clip_value = torch.sqrt(vals[percentile])
<31> gnorm_scale = 1.0
<32>
<33> if current_gnorm > clip_value:
<34> gnorm_scale = clip_value / current_gnorm
<35>
<36> return current_gnorm, clip_value, gnorm_scale
<37>
|
===========changed ref 0===========
<s> g: Tensor,
p: Tensor,
state1: Tensor,
state2: Tensor,
beta1: float,
beta2: float,
eps: float,
step: int,
lr: float,
qmap1: Tensor,
qmap2: Tensor,
absmax1: Tensor,
absmax2: Tensor,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
skip_zeros=False,
) -> None:
+ prev_device = pre_call(g.device)
+ is_on_gpu([g, p, state1, state2, qmap1, qmap2, absmax1, absmax2])
if g.dtype == torch.float32 and state1.dtype == torch.uint8:
str2optimizer8bit_blockwise[optimizer_name][0](
get_ptr(p),
get_ptr(g),
get_ptr(state1),
get_ptr(state2),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_int32(step),
ct.c_float(lr),
get_ptr(qmap1),
get_ptr(qmap2),
get_ptr(absmax1),
get_ptr(absmax2),
ct.c_float(weight_decay),
ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros),
ct.c_int32(g.numel()),
)
elif g.dtype == torch.float16 and state1.dtype == torch.uint8:
str2optimizer8bit_blockwise[optimizer_name][1](
get_ptr(p),
get_ptr(g),
get_ptr(state1),
get_ptr(state2),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_int32</s>
===========changed ref 1===========
<s> p: Tensor,
state1: Tensor,
state2: Tensor,
beta1: float,
beta2: float,
eps: float,
step: int,
lr: float,
qmap1: Tensor,
qmap2: Tensor,
absmax1: Tensor,
absmax2: Tensor,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
skip_zeros=False,
) -> None:
# offset: 1
<s>),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_int32(step),
ct.c_float(lr),
get_ptr(qmap1),
get_ptr(qmap2),
get_ptr(absmax1),
get_ptr(absmax2),
ct.c_float(weight_decay),
ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros),
ct.c_int32(g.numel()),
)
else:
raise ValueError(
f"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}"
)
+ post_call(prev_device)
===========changed ref 2===========
<s>
g: Tensor,
p: Tensor,
state1: Tensor,
beta1: float,
eps: float,
step: int,
lr: float,
state2: Tensor = None,
beta2: float = 0.0,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
unorm_vec: Tensor = None,
max_unorm: float = 0.0,
skip_zeros=False,
) -> None:
"""
Performs an inplace optimizer update with one or two optimizer states.
Universal optimizer update for 32-bit state and 32/16-bit gradients/weights.
Parameters
----------
optimizer_name : str
The name of the optimizer: {adam}.
g : torch.Tensor
Gradient tensor.
p : torch.Tensor
Parameter tensor.
state1 : torch.Tensor
Optimizer state 1.
beta1 : float
Optimizer beta1.
eps : float
Optimizer epsilon.
weight_decay : float
Weight decay.
step : int
Current optimizer step.
lr : float
The learning rate.
state2 : torch.Tensor
Optimizer state 2.
beta2 : float
Optimizer beta2.
gnorm_scale : float
The factor to rescale the gradient to the max clip value.
unorm_vec : torch.Tensor
The tensor for the update norm.
max_unorm : float
The maximum update norm relative to the weight norm.
skip_zeros : bool
Whether to skip zero-valued gradients or not (default: False).
"""
param_norm = 0.0
if max_unorm > 0.0:
param_norm = torch.norm(p.data.float())
if optimizer_name not in str2optimizer32bit:
raise NotImplementedError(
f'Optimizer not implemented: {optimizer_name}. Choices: {",".join(str2optimizer32bit.keys())}'
)
+ prev_device = pre_call(g.device)</s>
===========changed ref 3===========
<s>,
p: Tensor,
state1: Tensor,
beta1: float,
eps: float,
step: int,
lr: float,
state2: Tensor = None,
beta2: float = 0.0,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
unorm_vec: Tensor = None,
max_unorm: float = 0.0,
skip_zeros=False,
) -> None:
# offset: 1
<s>join(str2optimizer32bit.keys())}'
)
+ prev_device = pre_call(g.device)
+ is_on_gpu([g, p, state1, state2, unorm_vec])
if g.dtype == torch.float32 and state1.dtype == torch.float32:
str2optimizer32bit[optimizer_name][0](
get_ptr(g),
get_ptr(p),
get_ptr(state1),
get_ptr(state2),
get_ptr(unorm_vec),
ct.c_float(max_unorm),
ct.c_float(param_norm),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_float(weight_decay),
ct.c_int32(step),
ct.c_float(lr),
ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros),
ct.c_int32(g.numel()),
)
elif g.dtype == torch.float16 and state1.dtype == torch.float32:
str2optimizer32bit[optimizer_name][1](
get_ptr(g),
get_ptr(p),
get_ptr(state1),
get_ptr(state2),
get_ptr(unorm_vec),
</s>
|
bitsandbytes.functional/spmm_coo_very_sparse
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
2bb5c00ba9b0af840e9226a6100f2e968c0763f4
|
Added pre/post call to all lib calls. Fixes #120
|
<5>:<add> prev_device = pre_call(B.device)
|
# module: bitsandbytes.functional
def spmm_coo_very_sparse(cooA, B, dequant_stats=None, out=None):
<0> if out is None:
<1> out = torch.zeros(
<2> (cooA.rows, B.shape[1]), device=B.device, dtype=cooA.values.dtype
<3> )
<4> nnz = cooA.nnz
<5> assert cooA.rowidx.numel() == nnz
<6> assert cooA.colidx.numel() == nnz
<7> assert cooA.values.numel() == nnz
<8> assert cooA.cols == B.shape[0], f"{cooA.cols} vs {B.shape}"
<9>
<10> transposed_B = False if B.is_contiguous() else True
<11>
<12> ldb = B.stride()[(1 if transposed_B else 0)]
<13> ldc = B.shape[1]
<14>
<15> values, counts = torch.unique(cooA.rowidx, return_counts=True)
<16> offset = counts.cumsum(0).int()
<17> max_count, max_idx = torch.sort(counts, descending=True)
<18> max_idx = max_idx.int()
<19> max_count = max_count.int()
<20> assert (
<21> max_count[0] <= 32
<22> ), f"Current max count per row is 8 but found {max_count[0]}."
<23> assert B.dtype in [torch.float16, torch.int8]
<24> ptrOffset = get_ptr(offset)
<25> ptrMaxCount = get_ptr(max_count)
<26> ptrMaxIdx = get_ptr(max_idx)
<27>
<28> ptrRowidx = get_ptr(cooA.rowidx)
<29> ptrColidx = get_ptr(cooA.colidx)
<30> ptrValues = get_ptr(cooA.values)
<31> ptrB = get_ptr(B)
<32> ptrC = get_ptr(out)
<33> ptrDequant</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def spmm_coo_very_sparse(cooA, B, dequant_stats=None, out=None):
# offset: 1
cnnz_rows = ct.c_int32(counts.numel())
cnnz = ct.c_int32(cooA.nnz)
crowsA = ct.c_int32(cooA.rows)
ccolsA = ct.c_int32(cooA.cols)
crowsB = ct.c_int32(B.shape[1])
ccolsB = ct.c_int32(B.shape[1])
cldb = ct.c_int32(ldb)
cldc = ct.c_int32(ldc)
# print(cooA.rowidx[:64])
# print(cooA.colidx[:64].sort()[0])
is_on_gpu([cooA.rowidx, cooA.colidx, cooA.values, B, out, dequant_stats])
if B.dtype == torch.float16:
lib.cspmm_coo_very_sparse_naive_fp16(
ptrMaxCount,
ptrMaxIdx,
ptrOffset,
ptrRowidx,
ptrColidx,
ptrValues,
ptrB,
ptrC,
ptrDequantStats,
cnnz_rows,
cnnz,
crowsA,
crowsB,
ccolsB,
)
elif B.dtype == torch.int8:
lib.cspmm_coo_very_sparse_naive_int8(
ptrMaxCount,
ptrMaxIdx,
ptrOffset,
ptrRowidx,
ptrColidx,
ptrValues,
ptrB,
ptrC,
ptrDequantStats,
cnnz_rows,
cnnz,
crowsA,
crowsB,
ccolsB,
)
# else: assertion error
return out
===========changed ref 0===========
# module: bitsandbytes.functional
def percentile_clipping(
grad: Tensor, gnorm_vec: Tensor, step: int, percentile: int = 5
):
"""Applies percentile clipping
grad: torch.Tensor
The gradient tensor.
gnorm_vec: torch.Tensor
Vector of gradient norms. 100 elements expected.
step: int
The current optimiation steps (number of past gradient norms).
"""
+ prev_device = pre_call(grad.device)
is_on_gpu([grad, gnorm_vec])
if grad.dtype == torch.float32:
lib.cpercentile_clipping_g32(
get_ptr(grad),
get_ptr(gnorm_vec),
ct.c_int32(step),
ct.c_int32(grad.numel()),
)
elif grad.dtype == torch.float16:
lib.cpercentile_clipping_g16(
get_ptr(grad),
get_ptr(gnorm_vec),
ct.c_int32(step),
ct.c_int32(grad.numel()),
)
else:
raise ValueError(f"Gradient type {grad.dtype} not supported!")
+ post_call(prev_device)
current_gnorm = torch.sqrt(gnorm_vec[step % 100])
vals, idx = torch.sort(gnorm_vec)
clip_value = torch.sqrt(vals[percentile])
gnorm_scale = 1.0
if current_gnorm > clip_value:
gnorm_scale = clip_value / current_gnorm
return current_gnorm, clip_value, gnorm_scale
===========changed ref 1===========
<s> g: Tensor,
p: Tensor,
state1: Tensor,
state2: Tensor,
beta1: float,
beta2: float,
eps: float,
step: int,
lr: float,
qmap1: Tensor,
qmap2: Tensor,
absmax1: Tensor,
absmax2: Tensor,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
skip_zeros=False,
) -> None:
+ prev_device = pre_call(g.device)
+ is_on_gpu([g, p, state1, state2, qmap1, qmap2, absmax1, absmax2])
if g.dtype == torch.float32 and state1.dtype == torch.uint8:
str2optimizer8bit_blockwise[optimizer_name][0](
get_ptr(p),
get_ptr(g),
get_ptr(state1),
get_ptr(state2),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_int32(step),
ct.c_float(lr),
get_ptr(qmap1),
get_ptr(qmap2),
get_ptr(absmax1),
get_ptr(absmax2),
ct.c_float(weight_decay),
ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros),
ct.c_int32(g.numel()),
)
elif g.dtype == torch.float16 and state1.dtype == torch.uint8:
str2optimizer8bit_blockwise[optimizer_name][1](
get_ptr(p),
get_ptr(g),
get_ptr(state1),
get_ptr(state2),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_int32</s>
===========changed ref 2===========
<s> p: Tensor,
state1: Tensor,
state2: Tensor,
beta1: float,
beta2: float,
eps: float,
step: int,
lr: float,
qmap1: Tensor,
qmap2: Tensor,
absmax1: Tensor,
absmax2: Tensor,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
skip_zeros=False,
) -> None:
# offset: 1
<s>),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_int32(step),
ct.c_float(lr),
get_ptr(qmap1),
get_ptr(qmap2),
get_ptr(absmax1),
get_ptr(absmax2),
ct.c_float(weight_decay),
ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros),
ct.c_int32(g.numel()),
)
else:
raise ValueError(
f"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}"
)
+ post_call(prev_device)
|
bitsandbytes.cuda_setup.main/CUDASetup.run_cuda_setup
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
4cd63deff3b3cdc923e151c4efdaa281fa42d668
|
Fixed CUDA Conda PyTorch 2.0 issues.
|
<11>:<add>
<add> print('bin', binary_path)
|
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
<0> self.initialized = True
<1> self.cuda_setup_log = []
<2>
<3> binary_name, cudart_path, cuda, cc, cuda_version_string = evaluate_cuda_setup()
<4> self.cudart_path = cudart_path
<5> self.cuda = cuda
<6> self.cc = cc
<7> self.cuda_version_string = cuda_version_string
<8>
<9> package_dir = Path(__file__).parent.parent
<10> binary_path = package_dir / binary_name
<11>
<12> try:
<13> if not binary_path.exists():
<14> self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?")
<15> legacy_binary_name = "libbitsandbytes_cpu.so"
<16> self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...")
<17> binary_path = package_dir / legacy_binary_name
<18> if not binary_path.exists() or torch.cuda.is_available():
<19> self.add_log_entry('')
<20> self.add_log_entry('='*48 + 'ERROR' + '='*37)
<21> self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:')
<22> self.add_log_entry('1. CUDA driver not installed')
<23> self.add_log_entry('2. CUDA not installed')
<24> self.add_log_entry('3. You have multiple conflicting CUDA libraries')
<25> self.add_log_entry('4. Required library not pre-compiled for this bitsandbytes release!')
<26> self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.')
<27> </s>
|
===========below chunk 0===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
# offset: 1
self.add_log_entry('='*80)
self.add_log_entry('')
self.generate_instructions()
self.print_log_stack()
raise Exception('CUDA SETUP: Setup Failed!')
self.lib = ct.cdll.LoadLibrary(binary_path)
else:
self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path}...")
self.lib = ct.cdll.LoadLibrary(binary_path)
except Exception as ex:
self.add_log_entry(str(ex))
self.print_log_stack()
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
+ # these are the most common libs names
+ # libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
+ # we have libcudart.so.11.0 which causes a lot of errors before
+ # not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
+ CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0']
- CUDA_RUNTIME_LIB: str = "libcudart.so"
+ # this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths
+ backup_paths = []
+ backup_paths.append('$CONDA_PREFIX/lib/libcudart.so.11.0')
+
|
bitsandbytes.cuda_setup.main/get_cuda_runtime_lib_paths
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
4cd63deff3b3cdc923e151c4efdaa281fa42d668
|
Fixed CUDA Conda PyTorch 2.0 issues.
|
<0>:<add> paths = set()
<add> for libname in CUDA_RUNTIME_LIBS:
<del> return {
<1>:<del> path / CUDA_RUNTIME_LIB
<2>:<add> for path in candidate_paths:
<del> for path in candidate_paths
<3>:<add> if (path / libname).is_file():
<add> paths.add(path / libname)
<add> return paths
<del> if (path / CUDA_RUNTIME_LIB).is_file()
<4>:<del> }
|
# module: bitsandbytes.cuda_setup.main
def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]:
<0> return {
<1> path / CUDA_RUNTIME_LIB
<2> for path in candidate_paths
<3> if (path / CUDA_RUNTIME_LIB).is_file()
<4> }
<5>
|
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
+ # these are the most common libs names
+ # libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
+ # we have libcudart.so.11.0 which causes a lot of errors before
+ # not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
+ CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0']
- CUDA_RUNTIME_LIB: str = "libcudart.so"
+ # this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths
+ backup_paths = []
+ backup_paths.append('$CONDA_PREFIX/lib/libcudart.so.11.0')
+
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
self.initialized = True
self.cuda_setup_log = []
binary_name, cudart_path, cuda, cc, cuda_version_string = evaluate_cuda_setup()
self.cudart_path = cudart_path
self.cuda = cuda
self.cc = cc
self.cuda_version_string = cuda_version_string
package_dir = Path(__file__).parent.parent
binary_path = package_dir / binary_name
+
+ print('bin', binary_path)
try:
if not binary_path.exists():
self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?")
legacy_binary_name = "libbitsandbytes_cpu.so"
self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...")
binary_path = package_dir / legacy_binary_name
if not binary_path.exists() or torch.cuda.is_available():
self.add_log_entry('')
self.add_log_entry('='*48 + 'ERROR' + '='*37)
self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:')
self.add_log_entry('1. CUDA driver not installed')
self.add_log_entry('2. CUDA not installed')
self.add_log_entry('3. You have multiple conflicting CUDA libraries')
self.add_log_entry('4. Required library not pre-compiled for this bitsandbytes release!')
self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.')
self.add_log_entry('CUDA SETUP: The CU</s>
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
# offset: 1
<s> example, `make CUDA_VERSION=113`.')
self.add_log_entry('CUDA SETUP: The CUDA version for the compile might depend on your conda install. Inspect CUDA version via `conda list | grep cuda`.')
self.add_log_entry('='*80)
self.add_log_entry('')
self.generate_instructions()
- self.print_log_stack()
raise Exception('CUDA SETUP: Setup Failed!')
self.lib = ct.cdll.LoadLibrary(binary_path)
else:
self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path}...")
self.lib = ct.cdll.LoadLibrary(binary_path)
except Exception as ex:
self.add_log_entry(str(ex))
- self.print_log_stack()
|
bitsandbytes.cuda_setup.main/determine_cuda_runtime_lib_path
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
4cd63deff3b3cdc923e151c4efdaa281fa42d668
|
Fixed CUDA Conda PyTorch 2.0 issues.
|
# module: bitsandbytes.cuda_setup.main
def determine_cuda_runtime_lib_path() -> Union[Path, None]:
<0> """
<1> Searches for a cuda installations, in the following order of priority:
<2> 1. active conda env
<3> 2. LD_LIBRARY_PATH
<4> 3. any other env vars, while ignoring those that
<5> - are known to be unrelated (see `bnb.cuda_setup.env_vars.to_be_ignored`)
<6> - don't contain the path separator `/`
<7>
<8> If multiple libraries are found in part 3, we optimistically try one,
<9> while giving a warning message.
<10> """
<11> candidate_env_vars = get_potentially_lib_path_containing_env_vars()
<12>
<13> if "CONDA_PREFIX" in candidate_env_vars:
<14> conda_libs_path = Path(candidate_env_vars["CONDA_PREFIX"]) / "lib"
<15>
<16> conda_cuda_libs = find_cuda_lib_in(str(conda_libs_path))
<17> warn_in_case_of_duplicates(conda_cuda_libs)
<18>
<19> if conda_cuda_libs:
<20> return next(iter(conda_cuda_libs))
<21>
<22> CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["CONDA_PREFIX"]} did not contain '
<23> f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...', is_warning=True)
<24>
<25> if "LD_LIBRARY_PATH" in candidate_env_vars:
<26> lib_ld_cuda_libs = find_cuda_lib_in(candidate_env_vars["LD_LIBRARY_PATH"])
<27>
<28> if lib_ld_cuda_libs:
<29> return next(iter(lib_ld_cuda_libs))
<30> warn_in_case_of_duplicates(lib_ld_cuda_libs)
<31>
<32> CUDASetup.get_instance().add_</s>
|
===========below chunk 0===========
# module: bitsandbytes.cuda_setup.main
def determine_cuda_runtime_lib_path() -> Union[Path, None]:
# offset: 1
f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...', is_warning=True)
remaining_candidate_env_vars = {
env_var: value for env_var, value in candidate_env_vars.items()
if env_var not in {"CONDA_PREFIX", "LD_LIBRARY_PATH"}
}
cuda_runtime_libs = set()
for env_var, value in remaining_candidate_env_vars.items():
cuda_runtime_libs.update(find_cuda_lib_in(value))
if len(cuda_runtime_libs) == 0:
CUDASetup.get_instance().add_log_entry('CUDA_SETUP: WARNING! libcudart.so not found in any environmental path. Searching /usr/local/cuda/lib64...')
cuda_runtime_libs.update(find_cuda_lib_in('/usr/local/cuda/lib64'))
warn_in_case_of_duplicates(cuda_runtime_libs)
return next(iter(cuda_runtime_libs)) if cuda_runtime_libs else None
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]:
+ paths = set()
+ for libname in CUDA_RUNTIME_LIBS:
- return {
- path / CUDA_RUNTIME_LIB
+ for path in candidate_paths:
- for path in candidate_paths
+ if (path / libname).is_file():
+ paths.add(path / libname)
+ return paths
- if (path / CUDA_RUNTIME_LIB).is_file()
- }
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
+ # these are the most common libs names
+ # libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
+ # we have libcudart.so.11.0 which causes a lot of errors before
+ # not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
+ CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0']
- CUDA_RUNTIME_LIB: str = "libcudart.so"
+ # this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths
+ backup_paths = []
+ backup_paths.append('$CONDA_PREFIX/lib/libcudart.so.11.0')
+
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
self.initialized = True
self.cuda_setup_log = []
binary_name, cudart_path, cuda, cc, cuda_version_string = evaluate_cuda_setup()
self.cudart_path = cudart_path
self.cuda = cuda
self.cc = cc
self.cuda_version_string = cuda_version_string
package_dir = Path(__file__).parent.parent
binary_path = package_dir / binary_name
+
+ print('bin', binary_path)
try:
if not binary_path.exists():
self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?")
legacy_binary_name = "libbitsandbytes_cpu.so"
self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...")
binary_path = package_dir / legacy_binary_name
if not binary_path.exists() or torch.cuda.is_available():
self.add_log_entry('')
self.add_log_entry('='*48 + 'ERROR' + '='*37)
self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:')
self.add_log_entry('1. CUDA driver not installed')
self.add_log_entry('2. CUDA not installed')
self.add_log_entry('3. You have multiple conflicting CUDA libraries')
self.add_log_entry('4. Required library not pre-compiled for this bitsandbytes release!')
self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.')
self.add_log_entry('CUDA SETUP: The CU</s>
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
# offset: 1
<s> example, `make CUDA_VERSION=113`.')
self.add_log_entry('CUDA SETUP: The CUDA version for the compile might depend on your conda install. Inspect CUDA version via `conda list | grep cuda`.')
self.add_log_entry('='*80)
self.add_log_entry('')
self.generate_instructions()
- self.print_log_stack()
raise Exception('CUDA SETUP: Setup Failed!')
self.lib = ct.cdll.LoadLibrary(binary_path)
else:
self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path}...")
self.lib = ct.cdll.LoadLibrary(binary_path)
except Exception as ex:
self.add_log_entry(str(ex))
- self.print_log_stack()
|
|
bitsandbytes.cuda_setup.main/evaluate_cuda_setup
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
89e3b82731db66eb4bb0c0690f1f623c8ef6df65
|
Added more detailed cuda setup debug and debugging instructions.
|
<3>:<add> print(('Welcome to bitsandbytes. For bug reports, please run\n\npython -m bitsandbytes\n\n'),
<add> ('and submit this information together with your error trace to: https://github.com/TimDettmers/bitsandbytes/issues'))
<del> print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues')
|
# module: bitsandbytes.cuda_setup.main
def evaluate_cuda_setup():
<0> if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
<1> print('')
<2> print('='*35 + 'BUG REPORT' + '='*35)
<3> print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues')
<4> print('='*80)
<5> if not torch.cuda.is_available(): return 'libbitsandbytes_cpu.so', None, None, None, None
<6>
<7> cuda_setup = CUDASetup.get_instance()
<8> cudart_path = determine_cuda_runtime_lib_path()
<9> cuda = get_cuda_lib_handle()
<10> cc = get_compute_capability(cuda)
<11> cuda_version_string = get_cuda_version(cuda, cudart_path)
<12>
<13> failure = False
<14> if cudart_path is None:
<15> failure = True
<16> cuda_setup.add_log_entry("WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!", is_warning=True)
<17> else:
<18> cuda_setup.add_log_entry(f"CUDA SETUP: CUDA runtime path found: {cudart_path}")
<19>
<20> if cc == '' or cc is None:
<21> failure = True
<22> cuda_setup.add_log_entry("WARNING: No GPU detected! Check your CUDA paths. Proceeding to load CPU-only library...", is_warning=True)
<23> else:
<24> cuda_setup.add_log_entry(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}")
<25>
<26> if cuda is None:
<27> failure = True
<28> else:
<29> cuda_setup.add_log_entry(f'CUDA</s>
|
===========below chunk 0===========
# module: bitsandbytes.cuda_setup.main
def evaluate_cuda_setup():
# offset: 1
# 7.5 is the minimum CC vor cublaslt
has_cublaslt = is_cublasLt_compatible(cc)
# TODO:
# (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible)
# (2) Multiple CUDA versions installed
# we use ls -l instead of nvcc to determine the cuda version
# since most installations will have the libcudart.so installed, but not the compiler
if failure:
binary_name = "libbitsandbytes_cpu.so"
elif has_cublaslt:
binary_name = f"libbitsandbytes_cuda{cuda_version_string}.so"
else:
"if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so"
binary_name = f"libbitsandbytes_cuda{cuda_version_string}_nocublaslt.so"
return binary_name, cudart_path, cuda, cc, cuda_version_string
|
bitsandbytes.cuda_setup.main/warn_in_case_of_duplicates
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
659a7dfc7165b166b7972250c39daa0b90ad501d
|
Fixing #300.
|
<2>:<add> f"Found duplicate {CUDA_RUNTIME_LIBS} files: {results_paths}.. "
<del> f"Found duplicate {CUDA_RUNTIME_LIB} files: {results_paths}.. "
<7>:<add> f"{CUDA_RUNTIME_LIBS} in the paths that we search based on your env.")
<del> f"{CUDA_RUNTIME_LIB} in the paths that we search based on your env.")
|
# module: bitsandbytes.cuda_setup.main
def warn_in_case_of_duplicates(results_paths: Set[Path]) -> None:
<0> if len(results_paths) > 1:
<1> warning_msg = (
<2> f"Found duplicate {CUDA_RUNTIME_LIB} files: {results_paths}.. "
<3> "We'll flip a coin and try one of these, in order to fail forward.\n"
<4> "Either way, this might cause trouble in the future:\n"
<5> "If you get `CUDA error: invalid device function` errors, the above "
<6> "might be the cause and the solution is to make sure only one "
<7> f"{CUDA_RUNTIME_LIB} in the paths that we search based on your env.")
<8> CUDASetup.get_instance().add_log_entry(warning_msg, is_warning=True)
<9>
|
===========unchanged ref 0===========
at: bitsandbytes.cuda_setup.main
CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0']
CUDASetup()
at: bitsandbytes.cuda_setup.main.CUDASetup
_instance = None
get_instance()
at: pathlib
Path()
at: typing
Set = _alias(set, 1, inst=False, name='Set')
|
bitsandbytes.cuda_setup.main/determine_cuda_runtime_lib_path
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
659a7dfc7165b166b7972250c39daa0b90ad501d
|
Fixing #300.
|
<23>:<add> f'{CUDA_RUNTIME_LIBS} as expected! Searching further paths...', is_warning=True)
<del> f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...', is_warning=True)
|
# module: bitsandbytes.cuda_setup.main
def determine_cuda_runtime_lib_path() -> Union[Path, None]:
<0> """
<1> Searches for a cuda installations, in the following order of priority:
<2> 1. active conda env
<3> 2. LD_LIBRARY_PATH
<4> 3. any other env vars, while ignoring those that
<5> - are known to be unrelated (see `bnb.cuda_setup.env_vars.to_be_ignored`)
<6> - don't contain the path separator `/`
<7>
<8> If multiple libraries are found in part 3, we optimistically try one,
<9> while giving a warning message.
<10> """
<11> candidate_env_vars = get_potentially_lib_path_containing_env_vars()
<12>
<13> if "CONDA_PREFIX" in candidate_env_vars:
<14> conda_libs_path = Path(candidate_env_vars["CONDA_PREFIX"]) / "lib"
<15>
<16> conda_cuda_libs = find_cuda_lib_in(str(conda_libs_path))
<17> warn_in_case_of_duplicates(conda_cuda_libs)
<18>
<19> if conda_cuda_libs:
<20> return next(iter(conda_cuda_libs))
<21>
<22> CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["CONDA_PREFIX"]} did not contain '
<23> f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...', is_warning=True)
<24>
<25> if "LD_LIBRARY_PATH" in candidate_env_vars:
<26> lib_ld_cuda_libs = find_cuda_lib_in(candidate_env_vars["LD_LIBRARY_PATH"])
<27>
<28> if lib_ld_cuda_libs:
<29> return next(iter(lib_ld_cuda_libs))
<30> warn_in_case_of_duplicates(lib_ld_cuda_libs)
<31>
<32> CUDASetup.get_instance().add_</s>
|
===========below chunk 0===========
# module: bitsandbytes.cuda_setup.main
def determine_cuda_runtime_lib_path() -> Union[Path, None]:
# offset: 1
f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...', is_warning=True)
remaining_candidate_env_vars = {
env_var: value for env_var, value in candidate_env_vars.items()
if env_var not in {"CONDA_PREFIX", "LD_LIBRARY_PATH"}
}
cuda_runtime_libs = set()
for env_var, value in remaining_candidate_env_vars.items():
cuda_runtime_libs.update(find_cuda_lib_in(value))
if len(cuda_runtime_libs) == 0:
CUDASetup.get_instance().add_log_entry('CUDA_SETUP: WARNING! libcudart.so not found in any environmental path. Searching in backup paths...')
cuda_runtime_libs.update(find_cuda_lib_in('/usr/local/cuda/lib64'))
warn_in_case_of_duplicates(cuda_runtime_libs)
return next(iter(cuda_runtime_libs)) if cuda_runtime_libs else None
===========unchanged ref 0===========
at: bitsandbytes.cuda_setup.env_vars
get_potentially_lib_path_containing_env_vars() -> Dict[str, str]
at: bitsandbytes.cuda_setup.main
CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0']
CUDASetup()
find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]
warn_in_case_of_duplicates(results_paths: Set[Path]) -> None
at: bitsandbytes.cuda_setup.main.CUDASetup
get_instance()
at: pathlib
Path()
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
def warn_in_case_of_duplicates(results_paths: Set[Path]) -> None:
if len(results_paths) > 1:
warning_msg = (
+ f"Found duplicate {CUDA_RUNTIME_LIBS} files: {results_paths}.. "
- f"Found duplicate {CUDA_RUNTIME_LIB} files: {results_paths}.. "
"We'll flip a coin and try one of these, in order to fail forward.\n"
"Either way, this might cause trouble in the future:\n"
"If you get `CUDA error: invalid device function` errors, the above "
"might be the cause and the solution is to make sure only one "
+ f"{CUDA_RUNTIME_LIBS} in the paths that we search based on your env.")
- f"{CUDA_RUNTIME_LIB} in the paths that we search based on your env.")
CUDASetup.get_instance().add_log_entry(warning_msg, is_warning=True)
|
bitsandbytes.functional/create_fp8_map
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
7140c014058f9fa2a75ff315af6af9a59efcfe5f
|
Merge branch 'main' into fp8_merge
|
<14>:<add> bias = 2**(exponent_bits-1)
<del> bias = 2**(exponent_bits-1)-1
<22>:<add> value = value*2**-(bias)
<del> value = value*2**-(bias-1)
<25>:<add> value = value*2**-(evalue-bias-1)
<del> value = value*2**-(evalue-bias-2)
|
# module: bitsandbytes.functional
def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2, total_bits=8):
<0> e = exponent_bits
<1> p = precision_bits
<2> has_sign = 1 if signed else 0
<3> assert e+p == total_bits-has_sign
<4> # the exponent is biased to 2^(e-1) -1 == 0
<5> evalues = []
<6> pvalues = []
<7> for i, val in enumerate(range(-((2**(exponent_bits-has_sign))), 2**(exponent_bits-has_sign), 1)):
<8> evalues.append(2**val)
<9>
<10>
<11> values = []
<12> lst = list(itertools.product([0, 1], repeat=precision_bits))
<13> #for ev in evalues:
<14> bias = 2**(exponent_bits-1)-1
<15> for evalue in range(2**(exponent_bits)):
<16> for bit_pattern in lst:
<17> value = (1 if evalue != 0 else 0)
<18> for i, pval in enumerate(list(bit_pattern)):
<19> value += pval*(2**-(i+1))
<20> if evalue == 0:
<21> # subnormals
<22> value = value*2**-(bias-1)
<23> else:
<24> # normals
<25> value = value*2**-(evalue-bias-2)
<26> values.append(value)
<27> if signed:
<28> values.append(-value)
<29>
<30>
<31> assert len(values) == 2**total_bits
<32> values.sort()
<33> if total_bits < 8:
<34> gap = 256 - len(values)
<35> for i in range(gap):
<36> values.append(0)
<37> values.sort()
<38> code = torch.Tensor(values)
<39> code /= code.max()
<40>
<41> return code
<42>
|
===========changed ref 0===========
+ # module: bitsandbytes.nn.triton_based_modules
+
+
===========changed ref 1===========
+ # module: bitsandbytes.triton.int8_matmul_rowwise_dequantize
+
+
===========changed ref 2===========
+ # module: bitsandbytes.triton
+
+
===========changed ref 3===========
+ # module: bitsandbytes.triton.quantize_global
+
+
===========changed ref 4===========
+ # module: bitsandbytes.triton.int8_matmul_mixed_dequanitze
+
+
===========changed ref 5===========
+ # module: bitsandbytes.triton.dequantize_rowwise
+
+
===========changed ref 6===========
+ # module: bitsandbytes.triton.quantize_columnwise_and_transpose
+
+
===========changed ref 7===========
+ # module: bitsandbytes.triton.quantize_rowwise
+
+
===========changed ref 8===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ _instance = None
+
===========changed ref 9===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ def is_initialized(self):
+ return getattr(self, 'initialized', False)
+
===========changed ref 10===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 11===========
+ # module: bitsandbytes.triton.int8_matmul_rowwise_dequantize
+ # This is a matmul kernel based on triton.ops.matmul
+ # It is modified to support rowwise quantized input and columnwise quantized weight
+ # It's purpose is fused matmul then dequantize
+ # It does support bias.
+
+ def init_to_zero(name):
+ return lambda nargs: nargs[name].zero_()
+
===========changed ref 12===========
+ # module: bitsandbytes.triton.int8_matmul_mixed_dequanitze
+ # This is a matmul kernel based on triton.ops.matmul
+ # It is modified to support rowwise quantized input and global quantized weight
+ # It's purpose is fused matmul then dequantize
+ # It does support bias.
+
+ def init_to_zero(name):
+ return lambda nargs: nargs[name].zero_()
+
===========changed ref 13===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ def get_hvalue(self, weight):
+ return weight.data.storage().data_ptr()
+
===========changed ref 14===========
+ # module: bitsandbytes.nn.triton_based_modules
+ class StandardLinear(nn.Linear):
+ def forward(self, x):
+ return StandardLinearFunction.apply(x, self.weight, self.bias)
+
===========changed ref 15===========
# module: bitsandbytes.nn.modules
+ class OutlierAwareLinear(nn.Linear):
+ def quantize_weight(self, w, outlier_idx):
+ raise NotImplementedError('Please override the `quantize_weights(self, w, outlier_idx)` function')
+
===========changed ref 16===========
# module: bitsandbytes.nn.modules
+ class OutlierAwareLinear(nn.Linear):
+ def forward_with_outliers(self, x, outlier_idx):
+ raise NotImplementedError('Please override the `forward_with_outliers(self, x, outlier_idx)` function')
+
===========changed ref 17===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ @classmethod
+ def get_instance(cls):
+ if cls._instance is None:
+ cls._instance = cls.__new__(cls)
+ return cls._instance
+
===========changed ref 18===========
# module: bitsandbytes.nn.modules
+ class OutlierAwareLinear(nn.Linear):
+ def __init__(self, input_features, output_features, bias=True):
+ super().__init__(input_features, output_features, bias)
+ self.outlier_dim = None
+ self.is_quantized = False
+
===========changed ref 19===========
# module: bitsandbytes.nn.modules
+ class SwitchBackLinearBnb(nn.Linear):
+ def init_8bit_state(self):
+ self.state.CB = self.weight.CB
+ self.state.SCB = self.weight.SCB
+ self.weight.CB = None
+ self.weight.SCB = None
+
===========changed ref 20===========
+ # module: bitsandbytes.nn.triton_based_modules
+ SwitchBackLinearGlobal = partial(SwitchBackLinear, vectorize=False)
+ SwitchBackLinearGlobalMemEfficient = partial(SwitchBackLinear, vectorize=False, mem_efficient=True)
+ SwitchBackLinearVectorized = partial(SwitchBackLinear, vectorize=True)
+
===========changed ref 21===========
# module: bitsandbytes.nn.modules
+ class SwitchBackLinearBnb(nn.Linear):
+ def forward(self, x):
+ self.state.is_training = self.training
+
+ if self.weight.CB is not None:
+ self.init_8bit_state()
+
+ out = bnb.matmul_mixed(x.half(), self.weight.half(), bias=None, state=self.state) + self.bias
+
===========changed ref 22===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ def get_outliers(self, weight):
+ if not self.is_initialized():
+ print('Outlier tracer is not initialized...')
+ return None
+ hvalue = self.get_hvalue(weight)
+ if hvalue in self.hvalue2outlier_idx:
+ return self.hvalue2outlier_idx[hvalue]
+ else:
+ return None
+
===========changed ref 23===========
+ # module: bitsandbytes.nn.triton_based_modules
+ # This is just the standard linear function.
+ class StandardLinearFunction(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, input, weight, bias=None):
+ X = input.view(-1, input.size(-1))
+
+ ctx.save_for_backward(X, weight, bias)
+ output = input.matmul(weight.t())
+ if bias is not None:
+ output += bias.unsqueeze(0).expand_as(output)
+ return output.view(*input.size()[:-1], -1)
+
===========changed ref 24===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ def initialize(self, model):
+ self.last_w = None
+ self.current_outlier_dims = None
+ self.hvalues = []
+ self.outliers = []
+ self.hvalue2outlier_idx = {}
+ self.initialized = True
+ self.hooks = []
+
+ for n, m in model.named_modules():
+ if isinstance(m, torch.nn.Linear):
+ self.hooks.append(m.register_forward_pre_hook(outlier_hook))
+
===========changed ref 25===========
<s>iton.Config({}, num_warps=4),
+ triton.Config({}, num_warps=8),
+ ],
+ key=['n_elements']
+ )
+ @triton.jit
+ def _dequantize_rowwise(
+ x_ptr,
+ state_x,
+ output_ptr,
+ inv_127,
+ n_elements,
+ BLOCK_SIZE: tl.constexpr,
+ P2: tl.constexpr,
+ ):
+ pid = tl.program_id(axis=0)
+ block_start = pid * BLOCK_SIZE
+ arange = tl.arange(0, P2)
+ offsets = block_start + arange
+ row_mask = arange < BLOCK_SIZE
+ x = tl.load(x_ptr + offsets, mask=row_mask)
+ max_val = tl.load(state_x + pid)
+ output = max_val * x * inv_127
+ tl.store(output_ptr + offsets, output, mask=row_mask)
+
|
bitsandbytes.functional/quantize_blockwise
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
7140c014058f9fa2a75ff315af6af9a59efcfe5f
|
Merge branch 'main' into fp8_merge
|
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, rand=None, out: Tensor = None, blocksize=4096) -> Tensor:
<0> """
<1> Quantize tensor A in blocks of size 4096 values.
<2>
<3> Quantizes tensor A by dividing it into blocks of 4096 values.
<4> Then the absolute maximum value within these blocks is calculated
<5> for the non-linear quantization.
<6>
<7> Parameters
<8> ----------
<9> A : torch.Tensor
<10> The input tensor.
<11> code : torch.Tensor
<12> The quantization map.
<13> absmax : torch.Tensor
<14> The absmax values.
<15> rand : torch.Tensor
<16> The tensor for stochastic rounding.
<17> out : torch.Tensor
<18> The output tensor (8-bit).
<19>
<20> Returns
<21> -------
<22> torch.Tensor:
<23> The 8-bit tensor.
<24> tuple(torch.Tensor, torch.Tensor):
<25> The quantization state to undo the quantization.
<26> """
<27>
<28>
<29> if code is None:
<30> if "dynamic" not in name2qmap:
<31> name2qmap["dynamic"] = create_dynamic_map().to(A.device)
<32> code = name2qmap["dynamic"]
<33>
<34> if absmax is None:
<35> n = A.numel()
<36> blocks = n // blocksize
<37> blocks += 1 if n % blocksize > 0 else 0
<38> absmax = torch.zeros((blocks,), device=A.device)
<39>
<40> if out is None:
<41> out = torch.zeros_like(A, dtype=torch.uint8)
<42>
<43> if A.device.type != 'cpu':
<44> assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64]
<45> cblocksize = ct.c_int32(blocksize)
<46> prev_device = pre_call(A.device)
<47> code = code.to(A.device)
<48> if rand is</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, rand=None, out: Tensor = None, blocksize=4096) -> Tensor:
# offset: 1
is_on_gpu([code, A, out, absmax, rand])
assert blocksize==4096
assert rand.numel() >= 1024
rand_offset = random.randint(0, 1023)
if A.dtype == torch.float32:
lib.cquantize_blockwise_stochastic_fp32(get_ptr(code), get_ptr(A),get_ptr(absmax), get_ptr(out), get_ptr(rand), ct.c_int32(rand_offset), ct.c_int(A.numel()))
elif A.dtype == torch.float16:
lib.cquantize_blockwise_stochastic_fp16(get_ptr(code), get_ptr(A),get_ptr(absmax), get_ptr(out), get_ptr(rand), ct.c_int32(rand_offset), ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
else:
is_on_gpu([code, A, out, absmax])
if A.dtype == torch.float32:
lib.cquantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
elif A.dtype == torch.float16:
lib.cquantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
</s>
===========below chunk 1===========
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, rand=None, out: Tensor = None, blocksize=4096) -> Tensor:
# offset: 2
<s> 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
# cpu
code = code.cpu()
assert rand is None
lib.cquantize_blockwise_cpu_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
return out, (absmax, code)
===========changed ref 0===========
# module: bitsandbytes.functional
def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2, total_bits=8):
e = exponent_bits
p = precision_bits
has_sign = 1 if signed else 0
assert e+p == total_bits-has_sign
# the exponent is biased to 2^(e-1) -1 == 0
evalues = []
pvalues = []
for i, val in enumerate(range(-((2**(exponent_bits-has_sign))), 2**(exponent_bits-has_sign), 1)):
evalues.append(2**val)
values = []
lst = list(itertools.product([0, 1], repeat=precision_bits))
#for ev in evalues:
+ bias = 2**(exponent_bits-1)
- bias = 2**(exponent_bits-1)-1
for evalue in range(2**(exponent_bits)):
for bit_pattern in lst:
value = (1 if evalue != 0 else 0)
for i, pval in enumerate(list(bit_pattern)):
value += pval*(2**-(i+1))
if evalue == 0:
# subnormals
+ value = value*2**-(bias)
- value = value*2**-(bias-1)
else:
# normals
+ value = value*2**-(evalue-bias-1)
- value = value*2**-(evalue-bias-2)
values.append(value)
if signed:
values.append(-value)
assert len(values) == 2**total_bits
values.sort()
if total_bits < 8:
gap = 256 - len(values)
for i in range(gap):
values.append(0)
values.sort()
code = torch.Tensor(values)
code /= code.max()
return code
===========changed ref 1===========
+ # module: bitsandbytes.nn.triton_based_modules
+
+
===========changed ref 2===========
+ # module: bitsandbytes.triton.int8_matmul_rowwise_dequantize
+
+
===========changed ref 3===========
+ # module: bitsandbytes.triton
+
+
===========changed ref 4===========
+ # module: bitsandbytes.triton.quantize_global
+
+
===========changed ref 5===========
+ # module: bitsandbytes.triton.int8_matmul_mixed_dequanitze
+
+
===========changed ref 6===========
+ # module: bitsandbytes.triton.dequantize_rowwise
+
+
===========changed ref 7===========
+ # module: bitsandbytes.triton.quantize_columnwise_and_transpose
+
+
===========changed ref 8===========
+ # module: bitsandbytes.triton.quantize_rowwise
+
+
===========changed ref 9===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ _instance = None
+
===========changed ref 10===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ def is_initialized(self):
+ return getattr(self, 'initialized', False)
+
===========changed ref 11===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 12===========
+ # module: bitsandbytes.triton.int8_matmul_rowwise_dequantize
+ # This is a matmul kernel based on triton.ops.matmul
+ # It is modified to support rowwise quantized input and columnwise quantized weight
+ # It's purpose is fused matmul then dequantize
+ # It does support bias.
+
+ def init_to_zero(name):
+ return lambda nargs: nargs[name].zero_()
+
===========changed ref 13===========
+ # module: bitsandbytes.triton.int8_matmul_mixed_dequanitze
+ # This is a matmul kernel based on triton.ops.matmul
+ # It is modified to support rowwise quantized input and global quantized weight
+ # It's purpose is fused matmul then dequantize
+ # It does support bias.
+
+ def init_to_zero(name):
+ return lambda nargs: nargs[name].zero_()
+
===========changed ref 14===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ def get_hvalue(self, weight):
+ return weight.data.storage().data_ptr()
+
===========changed ref 15===========
+ # module: bitsandbytes.nn.triton_based_modules
+ class StandardLinear(nn.Linear):
+ def forward(self, x):
+ return StandardLinearFunction.apply(x, self.weight, self.bias)
+
===========changed ref 16===========
# module: bitsandbytes.nn.modules
+ class OutlierAwareLinear(nn.Linear):
+ def quantize_weight(self, w, outlier_idx):
+ raise NotImplementedError('Please override the `quantize_weights(self, w, outlier_idx)` function')
+
===========changed ref 17===========
# module: bitsandbytes.nn.modules
+ class OutlierAwareLinear(nn.Linear):
+ def forward_with_outliers(self, x, outlier_idx):
+ raise NotImplementedError('Please override the `forward_with_outliers(self, x, outlier_idx)` function')
+
===========changed ref 18===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ @classmethod
+ def get_instance(cls):
+ if cls._instance is None:
+ cls._instance = cls.__new__(cls)
+ return cls._instance
+
|
|
tests.test_functional/test_dynamic_blockwise_stochastic_quantization
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
7140c014058f9fa2a75ff315af6af9a59efcfe5f
|
Merge branch 'main' into fp8_merge
|
<3>:<add> err = 0
<5>:<add> C1, S1 = F.quantize_blockwise(A1, rand=rand, blocksize=blocksize)
<del> C1, S1 = F.quantize_blockwise(A1, rand=rand)
<6>:<add> C2, S2 = F.quantize_blockwise(A1, blocksize=blocksize)
<del> C2, S2 = F.quantize_blockwise(A1)
<7>:<add> A2 = F.dequantize_blockwise(C1, S1, blocksize=blocksize)
<add> err += (A1-A2).abs().mean().item()/100
<11>:<del> torch.testing.assert_allclose(
<12>:<add> torch.testing.assert_allclose(fraction_larger, fraction_smaller, atol=0.01, rtol=0)
<del> fraction_larger, fraction_smaller, atol=0.01, rtol=0
<13>:<add> assert err < 0.019
<del> )
|
<s>fs)/len(diffs))
#print('rand', blocksize, sum(reldiffs)/len(reldiffs))
+
+ @pytest.mark.parametrize("blocksize", [4096, 2048, 1024, 512, 256, 128, 64])
+ @pytest.mark.skip("Stochastic has some bugs, but will be deprecated soon anyways.")
+ def test_dynamic_blockwise_stochastic_quantization(blocksize):
- def test_dynamic_blockwise_stochastic_quantization():
<0> diffs = []
<1> reldiffs = []
<2> rand = torch.rand(1024).cuda()
<3> for i in range(100):
<4> A1 = torch.randn(1024, 1024, device="cuda")
<5> C1, S1 = F.quantize_blockwise(A1, rand=rand)
<6> C2, S2 = F.quantize_blockwise(A1)
<7> # a maximunm distance of quantized values of 1
<8> torch.testing.assert_allclose(C1, C2, atol=1, rtol=0)
<9> fraction_smaller = (C1 < C2).float().sum() / C1.numel()
<10> fraction_larger = (C1 > C2).float().sum() / C1.numel()
<11> torch.testing.assert_allclose(
<12> fraction_larger, fraction_smaller, atol=0.01, rtol=0
<13> )
<14>
|
===========changed ref 0===========
+ # module: bitsandbytes.nn.triton_based_modules
+
+
===========changed ref 1===========
+ # module: bitsandbytes.triton.int8_matmul_rowwise_dequantize
+
+
===========changed ref 2===========
+ # module: bitsandbytes.triton
+
+
===========changed ref 3===========
+ # module: bitsandbytes.triton.quantize_global
+
+
===========changed ref 4===========
+ # module: bitsandbytes.triton.int8_matmul_mixed_dequanitze
+
+
===========changed ref 5===========
+ # module: bitsandbytes.triton.dequantize_rowwise
+
+
===========changed ref 6===========
+ # module: bitsandbytes.triton.quantize_columnwise_and_transpose
+
+
===========changed ref 7===========
+ # module: bitsandbytes.triton.quantize_rowwise
+
+
===========changed ref 8===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ _instance = None
+
===========changed ref 9===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ def is_initialized(self):
+ return getattr(self, 'initialized', False)
+
===========changed ref 10===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 11===========
+ # module: bitsandbytes.triton.int8_matmul_rowwise_dequantize
+ # This is a matmul kernel based on triton.ops.matmul
+ # It is modified to support rowwise quantized input and columnwise quantized weight
+ # It's purpose is fused matmul then dequantize
+ # It does support bias.
+
+ def init_to_zero(name):
+ return lambda nargs: nargs[name].zero_()
+
===========changed ref 12===========
+ # module: bitsandbytes.triton.int8_matmul_mixed_dequanitze
+ # This is a matmul kernel based on triton.ops.matmul
+ # It is modified to support rowwise quantized input and global quantized weight
+ # It's purpose is fused matmul then dequantize
+ # It does support bias.
+
+ def init_to_zero(name):
+ return lambda nargs: nargs[name].zero_()
+
===========changed ref 13===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ def get_hvalue(self, weight):
+ return weight.data.storage().data_ptr()
+
===========changed ref 14===========
+ # module: bitsandbytes.nn.triton_based_modules
+ class StandardLinear(nn.Linear):
+ def forward(self, x):
+ return StandardLinearFunction.apply(x, self.weight, self.bias)
+
===========changed ref 15===========
# module: bitsandbytes.nn.modules
+ class OutlierAwareLinear(nn.Linear):
+ def quantize_weight(self, w, outlier_idx):
+ raise NotImplementedError('Please override the `quantize_weights(self, w, outlier_idx)` function')
+
===========changed ref 16===========
# module: bitsandbytes.nn.modules
+ class OutlierAwareLinear(nn.Linear):
+ def forward_with_outliers(self, x, outlier_idx):
+ raise NotImplementedError('Please override the `forward_with_outliers(self, x, outlier_idx)` function')
+
===========changed ref 17===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ @classmethod
+ def get_instance(cls):
+ if cls._instance is None:
+ cls._instance = cls.__new__(cls)
+ return cls._instance
+
===========changed ref 18===========
# module: bitsandbytes.nn.modules
+ class OutlierAwareLinear(nn.Linear):
+ def __init__(self, input_features, output_features, bias=True):
+ super().__init__(input_features, output_features, bias)
+ self.outlier_dim = None
+ self.is_quantized = False
+
===========changed ref 19===========
# module: bitsandbytes.nn.modules
+ class SwitchBackLinearBnb(nn.Linear):
+ def init_8bit_state(self):
+ self.state.CB = self.weight.CB
+ self.state.SCB = self.weight.SCB
+ self.weight.CB = None
+ self.weight.SCB = None
+
===========changed ref 20===========
+ # module: bitsandbytes.nn.triton_based_modules
+ SwitchBackLinearGlobal = partial(SwitchBackLinear, vectorize=False)
+ SwitchBackLinearGlobalMemEfficient = partial(SwitchBackLinear, vectorize=False, mem_efficient=True)
+ SwitchBackLinearVectorized = partial(SwitchBackLinear, vectorize=True)
+
===========changed ref 21===========
# module: bitsandbytes.nn.modules
+ class SwitchBackLinearBnb(nn.Linear):
+ def forward(self, x):
+ self.state.is_training = self.training
+
+ if self.weight.CB is not None:
+ self.init_8bit_state()
+
+ out = bnb.matmul_mixed(x.half(), self.weight.half(), bias=None, state=self.state) + self.bias
+
===========changed ref 22===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ def get_outliers(self, weight):
+ if not self.is_initialized():
+ print('Outlier tracer is not initialized...')
+ return None
+ hvalue = self.get_hvalue(weight)
+ if hvalue in self.hvalue2outlier_idx:
+ return self.hvalue2outlier_idx[hvalue]
+ else:
+ return None
+
===========changed ref 23===========
+ # module: bitsandbytes.nn.triton_based_modules
+ # This is just the standard linear function.
+ class StandardLinearFunction(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, input, weight, bias=None):
+ X = input.view(-1, input.size(-1))
+
+ ctx.save_for_backward(X, weight, bias)
+ output = input.matmul(weight.t())
+ if bias is not None:
+ output += bias.unsqueeze(0).expand_as(output)
+ return output.view(*input.size()[:-1], -1)
+
===========changed ref 24===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ def initialize(self, model):
+ self.last_w = None
+ self.current_outlier_dims = None
+ self.hvalues = []
+ self.outliers = []
+ self.hvalue2outlier_idx = {}
+ self.initialized = True
+ self.hooks = []
+
+ for n, m in model.named_modules():
+ if isinstance(m, torch.nn.Linear):
+ self.hooks.append(m.register_forward_pre_hook(outlier_hook))
+
===========changed ref 25===========
<s>iton.Config({}, num_warps=4),
+ triton.Config({}, num_warps=8),
+ ],
+ key=['n_elements']
+ )
+ @triton.jit
+ def _dequantize_rowwise(
+ x_ptr,
+ state_x,
+ output_ptr,
+ inv_127,
+ n_elements,
+ BLOCK_SIZE: tl.constexpr,
+ P2: tl.constexpr,
+ ):
+ pid = tl.program_id(axis=0)
+ block_start = pid * BLOCK_SIZE
+ arange = tl.arange(0, P2)
+ offsets = block_start + arange
+ row_mask = arange < BLOCK_SIZE
+ x = tl.load(x_ptr + offsets, mask=row_mask)
+ max_val = tl.load(state_x + pid)
+ output = max_val * x * inv_127
+ tl.store(output_ptr + offsets, output, mask=row_mask)
+
|
tests.test_functional/test_few_bit_quant
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
7140c014058f9fa2a75ff315af6af9a59efcfe5f
|
Merge branch 'main' into fp8_merge
|
<13>:<add> print(code)
|
# module: tests.test_functional
#assert diff < 0.0075
#print(3, sum(abserr)/len(abserr))
#print(3, sum(relerr)/len(relerr))
def test_few_bit_quant():
<0> #print('')
<1> for bits in range(2, 9):
<2> #print('='*30, bits, '='*30)
<3> for method in ['linear', 'fp8', 'dynamic', 'quantile']:
<4> abserrs = []
<5> relerrs = []
<6> code = None
<7> if method == 'linear':
<8> code = F.create_linear_map(True, total_bits=bits).cuda()
<9> elif method == 'fp8':
<10> ebits = math.ceil(bits/2)
<11> pbits = bits-ebits-1
<12> code = F.create_fp8_map(True, ebits, pbits, bits).cuda()
<13> elif method == 'dynamic':
<14> code = F.create_dynamic_map(True, bits-0, bits).cuda()
<15> elif method == 'quantile':
<16> values = torch.randn(2048, 2048, device='cuda')
<17> code = F.create_quantile_map(values, bits).cuda()
<18> # for some data types we have no zero
<19> # for some data types we have one zero
<20> # for some data types we have two zeros
<21> assert torch.unique(code).numel() in [2**bits, 2**bits-1], f'bits: {bits}, method: {method}'
<22> #print(method, (code==0).sum())
<23> assert code.numel() == 256
<24> for i in range(10):
<25>
<26> values = torch.randn(1, 32, device='cuda')
<27> values /= values.abs().max()
<28> #values[values.abs() < 1e-6] += 1e-5
<29>
<30> q1 = []
<31> v1 = []
<32> for v in values[</s>
|
===========below chunk 0===========
# module: tests.test_functional
#assert diff < 0.0075
#print(3, sum(abserr)/len(abserr))
#print(3, sum(relerr)/len(relerr))
def test_few_bit_quant():
# offset: 1
idx = torch.abs(v-code).argmin()
q1.append(idx.item())
v1.append(code[idx].item())
q1 = torch.Tensor(q1).cuda()
v1 = torch.Tensor(v1).cuda()
q2, S2 = F.quantize_blockwise(values, code=code)
v2 = F.dequantize_blockwise(q2, S2)
idx = torch.isclose(q1.int(), q2.int())
err2 = torch.abs(v2-values)
abserrs.append(err2.mean().item())
relerrs.append((err2/(1e-10+values).abs()).mean().item())
if idx.sum():
# some weird cases
err1 = torch.abs(v1-values).mean()
#assert err2.mean() <= err1
else:
torch.testing.assert_allclose(q1, q2)
===========changed ref 0===========
+ # module: bitsandbytes.nn.triton_based_modules
+
+
===========changed ref 1===========
+ # module: bitsandbytes.triton.int8_matmul_rowwise_dequantize
+
+
===========changed ref 2===========
+ # module: bitsandbytes.triton
+
+
===========changed ref 3===========
+ # module: bitsandbytes.triton.quantize_global
+
+
===========changed ref 4===========
+ # module: bitsandbytes.triton.int8_matmul_mixed_dequanitze
+
+
===========changed ref 5===========
+ # module: bitsandbytes.triton.dequantize_rowwise
+
+
===========changed ref 6===========
+ # module: bitsandbytes.triton.quantize_columnwise_and_transpose
+
+
===========changed ref 7===========
+ # module: bitsandbytes.triton.quantize_rowwise
+
+
===========changed ref 8===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ _instance = None
+
===========changed ref 9===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ def is_initialized(self):
+ return getattr(self, 'initialized', False)
+
===========changed ref 10===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 11===========
+ # module: bitsandbytes.triton.int8_matmul_rowwise_dequantize
+ # This is a matmul kernel based on triton.ops.matmul
+ # It is modified to support rowwise quantized input and columnwise quantized weight
+ # It's purpose is fused matmul then dequantize
+ # It does support bias.
+
+ def init_to_zero(name):
+ return lambda nargs: nargs[name].zero_()
+
===========changed ref 12===========
+ # module: bitsandbytes.triton.int8_matmul_mixed_dequanitze
+ # This is a matmul kernel based on triton.ops.matmul
+ # It is modified to support rowwise quantized input and global quantized weight
+ # It's purpose is fused matmul then dequantize
+ # It does support bias.
+
+ def init_to_zero(name):
+ return lambda nargs: nargs[name].zero_()
+
===========changed ref 13===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ def get_hvalue(self, weight):
+ return weight.data.storage().data_ptr()
+
===========changed ref 14===========
+ # module: bitsandbytes.nn.triton_based_modules
+ class StandardLinear(nn.Linear):
+ def forward(self, x):
+ return StandardLinearFunction.apply(x, self.weight, self.bias)
+
===========changed ref 15===========
# module: bitsandbytes.nn.modules
+ class OutlierAwareLinear(nn.Linear):
+ def quantize_weight(self, w, outlier_idx):
+ raise NotImplementedError('Please override the `quantize_weights(self, w, outlier_idx)` function')
+
===========changed ref 16===========
# module: bitsandbytes.nn.modules
+ class OutlierAwareLinear(nn.Linear):
+ def forward_with_outliers(self, x, outlier_idx):
+ raise NotImplementedError('Please override the `forward_with_outliers(self, x, outlier_idx)` function')
+
===========changed ref 17===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ @classmethod
+ def get_instance(cls):
+ if cls._instance is None:
+ cls._instance = cls.__new__(cls)
+ return cls._instance
+
===========changed ref 18===========
# module: bitsandbytes.nn.modules
+ class OutlierAwareLinear(nn.Linear):
+ def __init__(self, input_features, output_features, bias=True):
+ super().__init__(input_features, output_features, bias)
+ self.outlier_dim = None
+ self.is_quantized = False
+
===========changed ref 19===========
# module: bitsandbytes.nn.modules
+ class SwitchBackLinearBnb(nn.Linear):
+ def init_8bit_state(self):
+ self.state.CB = self.weight.CB
+ self.state.SCB = self.weight.SCB
+ self.weight.CB = None
+ self.weight.SCB = None
+
===========changed ref 20===========
+ # module: bitsandbytes.nn.triton_based_modules
+ SwitchBackLinearGlobal = partial(SwitchBackLinear, vectorize=False)
+ SwitchBackLinearGlobalMemEfficient = partial(SwitchBackLinear, vectorize=False, mem_efficient=True)
+ SwitchBackLinearVectorized = partial(SwitchBackLinear, vectorize=True)
+
===========changed ref 21===========
# module: bitsandbytes.nn.modules
+ class SwitchBackLinearBnb(nn.Linear):
+ def forward(self, x):
+ self.state.is_training = self.training
+
+ if self.weight.CB is not None:
+ self.init_8bit_state()
+
+ out = bnb.matmul_mixed(x.half(), self.weight.half(), bias=None, state=self.state) + self.bias
+
===========changed ref 22===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ def get_outliers(self, weight):
+ if not self.is_initialized():
+ print('Outlier tracer is not initialized...')
+ return None
+ hvalue = self.get_hvalue(weight)
+ if hvalue in self.hvalue2outlier_idx:
+ return self.hvalue2outlier_idx[hvalue]
+ else:
+ return None
+
===========changed ref 23===========
+ # module: bitsandbytes.nn.triton_based_modules
+ # This is just the standard linear function.
+ class StandardLinearFunction(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, input, weight, bias=None):
+ X = input.view(-1, input.size(-1))
+
+ ctx.save_for_backward(X, weight, bias)
+ output = input.matmul(weight.t())
+ if bias is not None:
+ output += bias.unsqueeze(0).expand_as(output)
+ return output.view(*input.size()[:-1], -1)
+
===========changed ref 24===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ def initialize(self, model):
+ self.last_w = None
+ self.current_outlier_dims = None
+ self.hvalues = []
+ self.outliers = []
+ self.hvalue2outlier_idx = {}
+ self.initialized = True
+ self.hooks = []
+
+ for n, m in model.named_modules():
+ if isinstance(m, torch.nn.Linear):
+ self.hooks.append(m.register_forward_pre_hook(outlier_hook))
+
|
tests.test_functional/test_bench_dequantization
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
7140c014058f9fa2a75ff315af6af9a59efcfe5f
|
Merge branch 'main' into fp8_merge
|
<1>:<add> code =F.create_fp8_map(True, 3, 0, 4).cuda()
<add> qa, SA = F.quantize_blockwise(a, code=code)
<del> qa, SA = F.quantize_blockwise(a)
<2>:<add> print(qa.max())
|
# module: tests.test_functional
def test_bench_dequantization():
<0> a = torch.rand(1024, 1024, device='cuda').half()
<1> qa, SA = F.quantize_blockwise(a)
<2>
<3> max_theoretical_mu = 1024*1024*2/1024**3/672*1000*1000
<4> #print(max_theoretical_mu)
<5>
<6> torch.cuda.synchronize()
<7> t0 = time.time()
<8> for i in range(100):
<9> F.dequantize_blockwise(qa, SA, blocksize=2048)
<10> torch.cuda.synchronize()
<11>
|
===========changed ref 0===========
# module: tests.test_functional
#assert diff < 0.0075
#print(3, sum(abserr)/len(abserr))
#print(3, sum(relerr)/len(relerr))
def test_few_bit_quant():
#print('')
for bits in range(2, 9):
#print('='*30, bits, '='*30)
for method in ['linear', 'fp8', 'dynamic', 'quantile']:
abserrs = []
relerrs = []
code = None
if method == 'linear':
code = F.create_linear_map(True, total_bits=bits).cuda()
elif method == 'fp8':
ebits = math.ceil(bits/2)
pbits = bits-ebits-1
code = F.create_fp8_map(True, ebits, pbits, bits).cuda()
+ print(code)
elif method == 'dynamic':
code = F.create_dynamic_map(True, bits-0, bits).cuda()
elif method == 'quantile':
values = torch.randn(2048, 2048, device='cuda')
code = F.create_quantile_map(values, bits).cuda()
# for some data types we have no zero
# for some data types we have one zero
# for some data types we have two zeros
assert torch.unique(code).numel() in [2**bits, 2**bits-1], f'bits: {bits}, method: {method}'
#print(method, (code==0).sum())
assert code.numel() == 256
for i in range(10):
values = torch.randn(1, 32, device='cuda')
values /= values.abs().max()
#values[values.abs() < 1e-6] += 1e-5
q1 = []
v1 = []
for v in values[0]:
idx = torch.abs(v-code).argmin()
q1.append(idx.</s>
===========changed ref 1===========
# module: tests.test_functional
#assert diff < 0.0075
#print(3, sum(abserr)/len(abserr))
#print(3, sum(relerr)/len(relerr))
def test_few_bit_quant():
# offset: 1
<s> v in values[0]:
idx = torch.abs(v-code).argmin()
q1.append(idx.item())
v1.append(code[idx].item())
q1 = torch.Tensor(q1).cuda()
v1 = torch.Tensor(v1).cuda()
q2, S2 = F.quantize_blockwise(values, code=code)
v2 = F.dequantize_blockwise(q2, S2)
idx = torch.isclose(q1.int(), q2.int())
err2 = torch.abs(v2-values)
abserrs.append(err2.mean().item())
relerrs.append((err2/(1e-10+values).abs()).mean().item())
if idx.sum():
# some weird cases
err1 = torch.abs(v1-values).mean()
#assert err2.mean() <= err1
else:
torch.testing.assert_allclose(q1, q2)
===========changed ref 2===========
+ # module: bitsandbytes.nn.triton_based_modules
+
+
===========changed ref 3===========
+ # module: bitsandbytes.triton.int8_matmul_rowwise_dequantize
+
+
===========changed ref 4===========
+ # module: bitsandbytes.triton
+
+
===========changed ref 5===========
+ # module: bitsandbytes.triton.quantize_global
+
+
===========changed ref 6===========
+ # module: bitsandbytes.triton.int8_matmul_mixed_dequanitze
+
+
===========changed ref 7===========
+ # module: bitsandbytes.triton.dequantize_rowwise
+
+
===========changed ref 8===========
+ # module: bitsandbytes.triton.quantize_columnwise_and_transpose
+
+
===========changed ref 9===========
+ # module: bitsandbytes.triton.quantize_rowwise
+
+
===========changed ref 10===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ _instance = None
+
===========changed ref 11===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ def is_initialized(self):
+ return getattr(self, 'initialized', False)
+
===========changed ref 12===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 13===========
+ # module: bitsandbytes.triton.int8_matmul_rowwise_dequantize
+ # This is a matmul kernel based on triton.ops.matmul
+ # It is modified to support rowwise quantized input and columnwise quantized weight
+ # It's purpose is fused matmul then dequantize
+ # It does support bias.
+
+ def init_to_zero(name):
+ return lambda nargs: nargs[name].zero_()
+
===========changed ref 14===========
+ # module: bitsandbytes.triton.int8_matmul_mixed_dequanitze
+ # This is a matmul kernel based on triton.ops.matmul
+ # It is modified to support rowwise quantized input and global quantized weight
+ # It's purpose is fused matmul then dequantize
+ # It does support bias.
+
+ def init_to_zero(name):
+ return lambda nargs: nargs[name].zero_()
+
===========changed ref 15===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ def get_hvalue(self, weight):
+ return weight.data.storage().data_ptr()
+
===========changed ref 16===========
+ # module: bitsandbytes.nn.triton_based_modules
+ class StandardLinear(nn.Linear):
+ def forward(self, x):
+ return StandardLinearFunction.apply(x, self.weight, self.bias)
+
===========changed ref 17===========
# module: bitsandbytes.nn.modules
+ class OutlierAwareLinear(nn.Linear):
+ def quantize_weight(self, w, outlier_idx):
+ raise NotImplementedError('Please override the `quantize_weights(self, w, outlier_idx)` function')
+
===========changed ref 18===========
# module: bitsandbytes.nn.modules
+ class OutlierAwareLinear(nn.Linear):
+ def forward_with_outliers(self, x, outlier_idx):
+ raise NotImplementedError('Please override the `forward_with_outliers(self, x, outlier_idx)` function')
+
===========changed ref 19===========
# module: bitsandbytes.utils
+ class OutlierTracer(object):
+ @classmethod
+ def get_instance(cls):
+ if cls._instance is None:
+ cls._instance = cls.__new__(cls)
+ return cls._instance
+
===========changed ref 20===========
# module: bitsandbytes.nn.modules
+ class OutlierAwareLinear(nn.Linear):
+ def __init__(self, input_features, output_features, bias=True):
+ super().__init__(input_features, output_features, bias)
+ self.outlier_dim = None
+ self.is_quantized = False
+
===========changed ref 21===========
# module: bitsandbytes.nn.modules
+ class SwitchBackLinearBnb(nn.Linear):
+ def init_8bit_state(self):
+ self.state.CB = self.weight.CB
+ self.state.SCB = self.weight.SCB
+ self.weight.CB = None
+ self.weight.SCB = None
+
===========changed ref 22===========
+ # module: bitsandbytes.nn.triton_based_modules
+ SwitchBackLinearGlobal = partial(SwitchBackLinear, vectorize=False)
+ SwitchBackLinearGlobalMemEfficient = partial(SwitchBackLinear, vectorize=False, mem_efficient=True)
+ SwitchBackLinearVectorized = partial(SwitchBackLinear, vectorize=True)
+
|
bitsandbytes.research.autograd._functions/switchback_bnb
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
c3d87e4435c168ab29d9ba50916ef0d3b015fb24
|
Added is_available_triton guard.
|
<3>:<add> return SwitchBackBnb.apply(A, B, out, bias, state)
<del> return MatMul8bitMixed.apply(A, B, out, bias, state)
|
# module: bitsandbytes.research.autograd._functions
def switchback_bnb(
A: tensor,
B: tensor,
out: tensor = None,
state: MatmulLtState = None,
threshold=0.0,
bias=None
):
<0> state = state or MatmulLtState()
<1> if threshold > 0.0:
<2> state.threshold = threshold
<3> return MatMul8bitMixed.apply(A, B, out, bias, state)
<4>
|
===========unchanged ref 0===========
at: bitsandbytes.autograd._functions
MatmulLtState(_tile_indices: Optional[torch.Tensor]=None, force_no_igemmlt: bool=False)
at: bitsandbytes.autograd._functions.MatmulLtState
_tile_indices: Optional[torch.Tensor] = None
force_no_igemmlt: bool = False
CB = None
CxB = None
SB = None
SCB = None
CxBt = None
SBt = None
CBt = None
subB = None
outlier_pool = None
has_accumulated_gradients = False
threshold = 0.0
idx = None
is_training = True
has_fp16_weights = True
memory_efficient_backward = False
use_pool = False
formatB = F.get_special_format_str()
at: bitsandbytes.research.autograd._functions
tensor = torch.Tensor
SwitchBackBnb(*args, **kwargs)
===========changed ref 0===========
# module: bitsandbytes.research.autograd._functions
- class MatMul8bitMixed(torch.autograd.Function):
- @staticmethod
- def backward(ctx, grad_output):
- if ctx.is_empty:
- bias_grad = (None if ctx.bias is None else torch.zeros_like(ctx.bias))
- return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None
- req_gradA, req_gradB, _, req_gradBias, _ = ctx.needs_input_grad
- CAt, subA, A = ctx.tensors
- SCAt, idx = ctx.tensor_states
- formatB = ctx.formatB
- state = ctx.state
- grad_A = grad_B = grad_bias = None
-
- if req_gradBias:
- # compute grad_bias first before changing grad_output dtype
- grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)
-
- # Cast grad_output to fp16
- if len(grad_output.shape) == 3:
- grad_output = grad_output.reshape(
- -1, grad_output.shape[-1]
- ).contiguous()
-
- Cgrad, Cgradt, SCgrad, SCgradt, coo_tensor = F.double_quant(grad_output.to(torch.float16))
-
- if req_gradB:
- # print('back A shape', A.shape)
- # print('grad output t shape', grad_output.t().shape)
- grad_B = torch.matmul(grad_output.t(), A)
-
- if req_gradA:
- if state.CBt is not None:
- C32grad, Sgrad = F.transform(Cgrad, "col32")
- if state.CxBt is None:
- state.CxBt, state.SBt = F.transform(
- state.CBt, to_</s>
===========changed ref 1===========
# module: bitsandbytes.research.autograd._functions
- class MatMul8bitMixed(torch.autograd.Function):
- @staticmethod
- def backward(ctx, grad_output):
# offset: 1
<s> None:
- state.CxBt, state.SBt = F.transform(
- state.CBt, to_order=formatB, transpose=True
- )
- # print('back B shape', state.CxBt.shape)
- # print('back grad shape', C32grad.shape)
- gradA32, SgradA32 = F.igemmlt(C32grad, state.CxBt, Sgrad, state.SBt)
- grad_A = F.mm_dequant(gradA32, SgradA32, SCgrad, state.SCBt).view(ctx.grad_shape).to(ctx.dtype_A)
-
- elif state.CB is not None:
- CB = state.CB.to(ctx.dtype_A, copy=True).mul_(state.SCB.unsqueeze(1).mul(1. / 127.0))
- grad_A = torch.matmul(grad_output, CB).view(ctx.grad_shape).to(ctx.dtype_A)
- else:
- raise Exception('State must contain either CBt or CB matrix for backward')
-
- return grad_A, grad_B, None, grad_bias, None
-
===========changed ref 2===========
# module: bitsandbytes.research.autograd._functions
- class MatMul8bitMixed(torch.autograd.Function):
- @staticmethod
- def backward(ctx, grad_output):
- if ctx.is_empty:
- bias_grad = (None if ctx.bias is None else torch.zeros_like(ctx.bias))
- return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None
- req_gradA, req_gradB, _, req_gradBias, _ = ctx.needs_input_grad
- CAt, subA, A = ctx.tensors
- SCAt, idx = ctx.tensor_states
- formatB = ctx.formatB
- state = ctx.state
- grad_A = grad_B = grad_bias = None
-
- if req_gradBias:
- # compute grad_bias first before changing grad_output dtype
- grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)
-
- # Cast grad_output to fp16
- if len(grad_output.shape) == 3:
- grad_output = grad_output.reshape(
- -1, grad_output.shape[-1]
- ).contiguous()
-
- Cgrad, Cgradt, SCgrad, SCgradt, coo_tensor = F.double_quant(grad_output.to(torch.float16))
-
- if req_gradB:
- # print('back A shape', A.shape)
- # print('grad output t shape', grad_output.t().shape)
- grad_B = torch.matmul(grad_output.t(), A)
-
- if req_gradA:
- if state.CBt is not None:
- C32grad, Sgrad = F.transform(Cgrad, "col32")
- if state.CxBt is None:
- state.CxBt, state.SBt = F.transform(
- state.CBt, to_</s>
|
bitsandbytes.nn.triton_based_modules/SwitchBackLinear.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
5b612bc6dfa131fb0cb27dcae5fd863c15694328
|
Added is_available_triton guard to Triton SwitchBackLinear.
|
<1>:<add>
<add> if not is_triton_available:
<add> raise ImportError('''Could not import triton. Please install triton to use SwitchBackLinear.
<add> Alternatively, you can use bnb.nn.SwitchBackLinearBnb, but it will be slower''')
|
# module: bitsandbytes.nn.triton_based_modules
class SwitchBackLinear(nn.Linear):
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
device=None,
dtype=None,
vectorize: bool = False,
mem_efficient : bool = False,
):
<0> super().__init__(in_features, out_features, bias, device, dtype)
<1>
<2> # By default, we use the global quantization.
<3> self.vectorize = vectorize
<4> if self.vectorize:
<5> self._fn = _switchback_vectorrize
<6> if mem_efficient:
<7> print('mem efficient is not supported for vectorize mode.')
<8> exit(1)
<9> else:
<10> if mem_efficient:
<11> self._fn = _switchback_global_mem_efficient
<12> else:
<13> self._fn = _switchback_global
<14>
|
===========unchanged ref 0===========
at: bitsandbytes.nn.triton_based_modules
_switchback_vectorrize(*args, **kwargs)
at: torch.nn.modules.linear
Linear(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None)
at: torch.nn.modules.linear.Linear
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
__init__(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) -> None
__init__(self, in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) -> None
|
bitsandbytes.nn.triton_based_modules/SwitchBackLinear.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
9e7cdc9ea95e9756d9f5621a0e2c7e2538363fae
|
Added last SwitchBack refactors. All tests green.
|
<7>:<add> self.vector_wise_quantization = vector_wise_quantization
<add> if self.vector_wise_quantization:
<del> self.vectorize = vectorize
<8>:<del> if self.vectorize:
<11>:<add> print('mem efficient is not supported for vector-wise quantization.')
<del> print('mem efficient is not supported for vectorize mode.')
|
# module: bitsandbytes.nn.triton_based_modules
class SwitchBackLinear(nn.Linear):
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
device=None,
dtype=None,
+ vector_wise_quantization: bool = False,
- vectorize: bool = False,
mem_efficient : bool = False,
):
<0> super().__init__(in_features, out_features, bias, device, dtype)
<1>
<2> if not is_triton_available:
<3> raise ImportError('''Could not import triton. Please install triton to use SwitchBackLinear.
<4> Alternatively, you can use bnb.nn.SwitchBackLinearBnb, but it will be slower''')
<5>
<6> # By default, we use the global quantization.
<7> self.vectorize = vectorize
<8> if self.vectorize:
<9> self._fn = _switchback_vectorrize
<10> if mem_efficient:
<11> print('mem efficient is not supported for vectorize mode.')
<12> exit(1)
<13> else:
<14> if mem_efficient:
<15> self._fn = _switchback_global_mem_efficient
<16> else:
<17> self._fn = _switchback_global
<18>
|
===========unchanged ref 0===========
at: bitsandbytes.triton.triton_utils
is_triton_available()
at: torch.nn.modules.linear.Linear
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
__init__(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) -> None
__init__(self, in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) -> None
|
bitsandbytes.nn.triton_based_modules/SwitchBackLinear.prepare_for_eval
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
9e7cdc9ea95e9756d9f5621a0e2c7e2538363fae
|
Added last SwitchBack refactors. All tests green.
|
<8>:<add> if self.vector_wise_quantization:
<del> if self.vectorize:
|
# module: bitsandbytes.nn.triton_based_modules
class SwitchBackLinear(nn.Linear):
def prepare_for_eval(self):
<0> # If we just want to do eval, we can pre-quantize the weights instead of doing it on the forward pass.
<1> # Note this is experimental and not tested thoroughly.
<2> # Note this needs to be explicitly called with something like
<3> # def cond_prepare(m):
<4> # if hasattr(m, "prepare_for_eval"):
<5> # m.prepare_for_eval()
<6> # model.apply(cond_prepare)
<7> print('=> preparing for eval.')
<8> if self.vectorize:
<9> W_int8, state_W = quantize_rowwise(self.weight)
<10> else:
<11> W_int8, state_W = quantize_global(self.weight)
<12>
<13> self.register_buffer("W_int8", W_int8)
<14> self.register_buffer("state_W", state_W)
<15>
<16> del self.weight
<17>
|
===========unchanged ref 0===========
at: bitsandbytes.nn.triton_based_modules.SwitchBackLinear.__init__
self.vector_wise_quantization = vector_wise_quantization
===========changed ref 0===========
# module: bitsandbytes.nn.triton_based_modules
class SwitchBackLinear(nn.Linear):
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
device=None,
dtype=None,
+ vector_wise_quantization: bool = False,
- vectorize: bool = False,
mem_efficient : bool = False,
):
super().__init__(in_features, out_features, bias, device, dtype)
if not is_triton_available:
raise ImportError('''Could not import triton. Please install triton to use SwitchBackLinear.
Alternatively, you can use bnb.nn.SwitchBackLinearBnb, but it will be slower''')
# By default, we use the global quantization.
+ self.vector_wise_quantization = vector_wise_quantization
+ if self.vector_wise_quantization:
- self.vectorize = vectorize
- if self.vectorize:
self._fn = _switchback_vectorrize
if mem_efficient:
+ print('mem efficient is not supported for vector-wise quantization.')
- print('mem efficient is not supported for vectorize mode.')
exit(1)
else:
if mem_efficient:
self._fn = _switchback_global_mem_efficient
else:
self._fn = _switchback_global
|
bitsandbytes.nn.triton_based_modules/SwitchBackLinear.forward
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
9e7cdc9ea95e9756d9f5621a0e2c7e2538363fae
|
Added last SwitchBack refactors. All tests green.
|
<11>:<add> if self.vector_wise_quantization:
<del> if self.vectorize:
|
# module: bitsandbytes.nn.triton_based_modules
class SwitchBackLinear(nn.Linear):
def forward(self, x):
<0> if self.training:
<1> return self._fn.apply(x, self.weight, self.bias)
<2> else:
<3> # If it hasn't been "prepared for eval", run the standard forward pass.
<4> if not hasattr(self, "W_int8"):
<5> return self._fn.apply(x, self.weight, self.bias)
<6>
<7> # Otherwise, use pre-computed weights.
<8> X = x.view(-1, x.size(-1))
<9> X_int8, state_X = quantize_rowwise(X)
<10>
<11> if self.vectorize:
<12> return int8_matmul_rowwise_dequantize(
<13> X_int8, self.W_int8.t(), state_X, self.state_W, self.bias
<14> ).view(*x.size()[:-1], -1)
<15> else:
<16> return int8_matmul_mixed_dequanitze(
<17> X_int8, self.W_int8.t(), state_X, self.state_W, self.bias
<18> ).view(*x.size()[:-1], -1)
<19>
|
===========unchanged ref 0===========
at: bitsandbytes.nn.triton_based_modules.SwitchBackLinear.__init__
self.vector_wise_quantization = vector_wise_quantization
self._fn = _switchback_vectorrize
self._fn = _switchback_global
self._fn = _switchback_global_mem_efficient
===========changed ref 0===========
# module: bitsandbytes.nn.triton_based_modules
class SwitchBackLinear(nn.Linear):
def prepare_for_eval(self):
# If we just want to do eval, we can pre-quantize the weights instead of doing it on the forward pass.
# Note this is experimental and not tested thoroughly.
# Note this needs to be explicitly called with something like
# def cond_prepare(m):
# if hasattr(m, "prepare_for_eval"):
# m.prepare_for_eval()
# model.apply(cond_prepare)
print('=> preparing for eval.')
+ if self.vector_wise_quantization:
- if self.vectorize:
W_int8, state_W = quantize_rowwise(self.weight)
else:
W_int8, state_W = quantize_global(self.weight)
self.register_buffer("W_int8", W_int8)
self.register_buffer("state_W", state_W)
del self.weight
===========changed ref 1===========
# module: bitsandbytes.nn.triton_based_modules
class SwitchBackLinear(nn.Linear):
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
device=None,
dtype=None,
+ vector_wise_quantization: bool = False,
- vectorize: bool = False,
mem_efficient : bool = False,
):
super().__init__(in_features, out_features, bias, device, dtype)
if not is_triton_available:
raise ImportError('''Could not import triton. Please install triton to use SwitchBackLinear.
Alternatively, you can use bnb.nn.SwitchBackLinearBnb, but it will be slower''')
# By default, we use the global quantization.
+ self.vector_wise_quantization = vector_wise_quantization
+ if self.vector_wise_quantization:
- self.vectorize = vectorize
- if self.vectorize:
self._fn = _switchback_vectorrize
if mem_efficient:
+ print('mem efficient is not supported for vector-wise quantization.')
- print('mem efficient is not supported for vectorize mode.')
exit(1)
else:
if mem_efficient:
self._fn = _switchback_global_mem_efficient
else:
self._fn = _switchback_global
|
tests.test_triton/test_switchback
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
9e7cdc9ea95e9756d9f5621a0e2c7e2538363fae
|
Added last SwitchBack refactors. All tests green.
|
<0>:<add> for dim in [83]:
<del> for dim in [83, 17, 128]:
<1>:<add> for batch in [13]:
<del> for batch in [13, 128, 256]:
<4>:<del> print('vectorrize', vectorrize)
<5>:<add> switchback = SwitchBackLinear(dim, 4 * dim, vector_wise_quantization=vector_wise_quantization).cuda().half()
<del> switchback = SwitchBackLinear(dim, 4 * dim, vectorize=vectorrize).cuda().half()
|
<s>pytest.mark.skipif(not torch.cuda.is_available() or not torch.cuda.get_device_capability()[0] >= 8, reason="This test requires a GPU with compute capability 8.0 or higher.")
+ @pytest.mark.parametrize("vector_wise_quantization", [False, True])
- @pytest.mark.parametrize("vectorrize", [False, True])
+ def test_switchback(vector_wise_quantization):
- def test_switchback(vectorrize):
<0> for dim in [83, 17, 128]:
<1> for batch in [13, 128, 256]:
<2>
<3> standard = torch.nn.Linear(dim, 4 * dim).cuda().half()
<4> print('vectorrize', vectorrize)
<5> switchback = SwitchBackLinear(dim, 4 * dim, vectorize=vectorrize).cuda().half()
<6> baseline = Linear8bitLt(dim, 4 * dim).cuda().half()
<7> switchback.weight.data.copy_(standard.weight)
<8> switchback.bias.data.copy_(standard.bias)
<9> baseline.weight.data.copy_(standard.weight)
<10> baseline.bias.data.copy_(standard.bias)
<11>
<12> x1 = torch.randn(batch, dim).cuda().half().requires_grad_(True)
<13> x2 = x1.clone().detach().requires_grad_(True)
<14> x3 = x1.clone().detach().requires_grad_(True)
<15>
<16> out_standard = standard(x1)
<17> (2**10 * out_standard.abs().mean()).backward()
<18>
<19> print(x2.dtype)
<20> out_sb = switchback(x2)
<21> (2**10 * out_sb.abs().mean()).backward()
<22>
<23> out_baseline = baseline(x3)
<24> (2**10 * out_baseline.abs().mean()).backward()
<25>
<26> err_sb = (out_standard - out_sb).abs().mean()
<27> </s>
|
===========below chunk 0===========
<s>skipif(not torch.cuda.is_available() or not torch.cuda.get_device_capability()[0] >= 8, reason="This test requires a GPU with compute capability 8.0 or higher.")
+ @pytest.mark.parametrize("vector_wise_quantization", [False, True])
- @pytest.mark.parametrize("vectorrize", [False, True])
+ def test_switchback(vector_wise_quantization):
- def test_switchback(vectorrize):
# offset: 1
print('OUT', err_sb, err_baseline)
assert err_sb < 2 * err_baseline
err_sb = (standard.bias.grad - switchback.bias.grad).abs().mean()
err_baseline = (standard.bias.grad - baseline.bias.grad).abs().mean()
print('GW2', err_sb, err_baseline)
assert err_sb < 2 * err_baseline
err_sb = (standard.weight.grad - switchback.weight.grad).abs().mean()
err_baseline = (standard.weight.grad - baseline.weight.grad).abs().mean()
print('GW1', err_sb, err_baseline)
assert err_sb < 2 * err_baseline
err_sb = (x1.grad - x2.grad).abs().mean()
err_baseline = (x1.grad - x3.grad).abs().mean()
print('GX1', err_sb, err_baseline)
assert err_sb < 2 * err_baseline
===========unchanged ref 0===========
at: _pytest.mark.structures
MARK_GEN = MarkGenerator(_ispytest=True)
at: _pytest.mark.structures.MarkGenerator
skip: _SkipMarkDecorator
skipif: _SkipifMarkDecorator
xfail: _XfailMarkDecorator
parametrize: _ParametrizeMarkDecorator
usefixtures: _UsefixturesMarkDecorator
filterwarnings: _FilterwarningsMarkDecorator
at: bitsandbytes.nn.modules
Linear8bitLt(input_features, output_features, bias=True, has_fp16_weights=True, memory_efficient_backward=False, threshold=0.0, index=None)
at: bitsandbytes.nn.modules.Int8Params.cuda
self.data = CB
at: bitsandbytes.nn.modules.Linear8bitLt.__init__
self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights)
at: bitsandbytes.nn.triton_based_modules
SwitchBackLinear(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None, vector_wise_quantization: bool=False, mem_efficient: bool=False)
at: bitsandbytes.triton.triton_utils
is_triton_available()
===========unchanged ref 1===========
at: torch._C._VariableFunctions
randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional</s>
===========unchanged ref 2===========
at: torch._tensor.Tensor.__setstate__
self.data = state[0]
at: torch.cuda
is_available() -> bool
get_device_capability(device: Optional[_device_t]=None) -> Tuple[int, int]
at: torch.nn.modules.linear
Linear(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None)
at: torch.nn.modules.linear.Linear.__init__
self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs))
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
at: torch.nn.modules.module.Module
dump_patches: bool = False
_version: int = 1
training: bool
_parameters: Dict[str, Optional[Parameter]]
_buffers: Dict[str, Optional[Tensor]]
_non_persistent_buffers_set: Set[str]
_backward_pre_hooks: Dict[int, Callable]
_backward_hooks: Dict[int, Callable]
_is_full_backward_hook: Optional[bool]
_forward_hooks: Dict[int, Callable]
_forward_hooks_with_kwargs: Dict[int, bool]
_forward_hooks_always_called: Dict[int, bool]
_forward_pre_hooks: Dict[int, Callable]
_forward_pre_hooks_with_kwargs: Dict[int, bool]
_state_dict_hooks: Dict[int, Callable]
_load_state_dict_pre_hooks: Dict[int, Callable]
_state_dict_pre_hooks: Dict[int, Callable]
_load_state_dict_post_hooks: Dict[int, Callable]
_modules: Dict[str, Optional['Module']]
call_super_init: bool = False
_compiled_call_impl : Optional[Callable] = None
===========unchanged ref 3===========
forward: Callable[..., Any] = _forward_unimplemented
cuda(device: Optional[Union[int, device]]=None) -> T
half() -> T
__call__ : Callable[..., Any] = _wrapped_call_impl
T_destination = TypeVar('T_destination', bound=Dict[str, Any])
===========changed ref 0===========
# module: bitsandbytes.nn.triton_based_modules
+ SwitchBackLinearGlobal = partial(SwitchBackLinear, vector_wise_quantization=False)
- SwitchBackLinearGlobal = partial(SwitchBackLinear, vectorize=False)
+ SwitchBackLinearGlobalMemEfficient = partial(SwitchBackLinear, vector_wise_quantization=False, mem_efficient=True)
- SwitchBackLinearGlobalMemEfficient = partial(SwitchBackLinear, vectorize=False, mem_efficient=True)
+ SwitchBackLinearVectorwise = partial(SwitchBackLinear, vector_wise_quantization=True)
- SwitchBackLinearVectorized = partial(SwitchBackLinear, vectorize=True)
|
bitsandbytes.cuda_setup.main/CUDASetup.generate_instructions
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<0>:<add> if getattr(self, 'error', False): return
<add> print(self.error)
<add> self.error = True
|
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
<0> if self.cuda is None:
<1> self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected.')
<2> self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.')
<3> self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:')
<4> self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null')
<5> self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a')
<6> self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc')
<7> return
<8>
<9> if self.cudart_path is None:
<10> self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.')
<11> self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable')
<12> self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev/null')
<13> self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a')
<14> self.add_log_entry('CUDA</s>
|
===========below chunk 0===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 1
self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.')
self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/cuda_install.sh')
self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.')
self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local')
return
make_cmd = f'CUDA_VERSION={self.cuda_version_string}'
if len(self.cuda_version_string) < 3:
make_cmd += ' make cuda92'
elif self.cuda_version_string == '110':
make_cmd += ' make cuda110'
elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0:
make_cmd += ' make cuda11x'
elif self.cuda_version_string == '100':
self.add_log_entry('CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.')
self.add_log_entry('CUDA SETUP: Before you try again running bitsandbytes, make sure old CUDA 10.0 versions are uninstalled and removed from $LD_LIBRARY_PATH variables.')
return
has_cublaslt = is_cublasLt_compatible(self.cc)
if not has_cublaslt:
make_cmd += '_nomatmul'
self.add_log_entry('CUDA SETUP: Something unexpected</s>
===========below chunk 1===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 2
<s>:
make_cmd += '_nomatmul'
self.add_log_entry('CUDA SETUP: Something unexpected happened. Please compile from source:')
self.add_log_entry('git clone [email protected]:TimDettmers/bitsandbytes.git')
self.add_log_entry('cd bitsandbytes')
self.add_log_entry(make_cmd)
self.add_log_entry('python setup.py install')
|
bitsandbytes.cuda_setup.main/CUDASetup.initialize
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<4>:<add> self.error = False
|
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def initialize(self):
<0> if not getattr(self, 'initialized', False):
<1> self.has_printed = False
<2> self.lib = None
<3> self.initialized = False
<4>
|
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
+ if getattr(self, 'error', False): return
+ print(self.error)
+ self.error = True
if self.cuda is None:
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected.')
self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.')
self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:')
self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a')
self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc')
return
if self.cudart_path is None:
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.')
self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable')
self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM</s>
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 1
<s> it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a')
self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from 1b into your .bashrc file, located at ~/.bashrc')
self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.')
self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/cuda_install.sh')
self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.')
self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local')
return
make_cmd = f'CUDA_VERSION={self.cuda_version_string}'
if len(self.cuda_version_string) < 3:
make_cmd += ' make cuda92'
elif self.cuda_version_string == '110':
make_cmd += ' make cuda110'
elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0:
make_cmd += ' make cuda11x'
elif self.cuda_version_string == '100':
self.add_log_entry('CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.')
</s>
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 2
<s>add_log_entry('CUDA SETUP: Before you try again running bitsandbytes, make sure old CUDA 10.0 versions are uninstalled and removed from $LD_LIBRARY_PATH variables.')
return
has_cublaslt = is_cublasLt_compatible(self.cc)
if not has_cublaslt:
make_cmd += '_nomatmul'
self.add_log_entry('CUDA SETUP: Something unexpected happened. Please compile from source:')
self.add_log_entry('git clone [email protected]:TimDettmers/bitsandbytes.git')
self.add_log_entry('cd bitsandbytes')
self.add_log_entry(make_cmd)
self.add_log_entry('python setup.py install')
|
bitsandbytes.optim.adamw/AdamW.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<0>:<del> super().__init__(
<1>:<del> "adam",
<2>:<del> params,
<3>:<del> lr,
<4>:<del> betas,
<5>:<del> eps,
<6>:<del> weight_decay,
<7>:<del> optim_bits,
<8>:<del> args,
<9>:<del> min_8bit_size,
<10>:<del> percentile_clipping,
<11>:<del> block_wise,
<12>:<del> )
<13>:<add> super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged )
|
<s>size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
<0> super().__init__(
<1> "adam",
<2> params,
<3> lr,
<4> betas,
<5> eps,
<6> weight_decay,
<7> optim_bits,
<8> args,
<9> min_8bit_size,
<10> percentile_clipping,
<11> block_wise,
<12> )
<13>
|
===========unchanged ref 0===========
at: bitsandbytes.optim.optimizer
Optimizer2State(optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, is_paged=False)
at: bitsandbytes.optim.optimizer.Optimizer2State
__init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, is_paged=False)
__init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, is_paged=False)
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
+ self.error = False
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
+ if getattr(self, 'error', False): return
+ print(self.error)
+ self.error = True
if self.cuda is None:
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected.')
self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.')
self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:')
self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a')
self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc')
return
if self.cudart_path is None:
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.')
self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable')
self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM</s>
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 1
<s> it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a')
self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from 1b into your .bashrc file, located at ~/.bashrc')
self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.')
self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/cuda_install.sh')
self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.')
self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local')
return
make_cmd = f'CUDA_VERSION={self.cuda_version_string}'
if len(self.cuda_version_string) < 3:
make_cmd += ' make cuda92'
elif self.cuda_version_string == '110':
make_cmd += ' make cuda110'
elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0:
make_cmd += ' make cuda11x'
elif self.cuda_version_string == '100':
self.add_log_entry('CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.')
</s>
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 2
<s>add_log_entry('CUDA SETUP: Before you try again running bitsandbytes, make sure old CUDA 10.0 versions are uninstalled and removed from $LD_LIBRARY_PATH variables.')
return
has_cublaslt = is_cublasLt_compatible(self.cc)
if not has_cublaslt:
make_cmd += '_nomatmul'
self.add_log_entry('CUDA SETUP: Something unexpected happened. Please compile from source:')
self.add_log_entry('git clone [email protected]:TimDettmers/bitsandbytes.git')
self.add_log_entry('cd bitsandbytes')
self.add_log_entry(make_cmd)
self.add_log_entry('python setup.py install')
|
bitsandbytes.optim.adamw/AdamW8bit.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<0>:<del> super().__init__(
<1>:<del> "adam",
<2>:<del> params,
<3>:<del> lr,
<4>:<del> betas,
<5>:<del> eps,
<6>:<del> weight_decay,
<7>:<del> 8,
<8>:<del> args,
<9>:<del> min_8bit_size,
<10>:<del> percentile_clipping,
<11>:<del> block_wise,
<12>:<del> )
<13>:<add> super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged )
|
<s>size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
<0> super().__init__(
<1> "adam",
<2> params,
<3> lr,
<4> betas,
<5> eps,
<6> weight_decay,
<7> 8,
<8> args,
<9> min_8bit_size,
<10> percentile_clipping,
<11> block_wise,
<12> )
<13>
|
===========changed ref 0===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 1===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 2===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 3===========
<s>size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "adam",
- params,
- lr,
- betas,
- eps,
- weight_decay,
- optim_bits,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged )
===========changed ref 4===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
+ self.error = False
===========changed ref 5===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
+ if getattr(self, 'error', False): return
+ print(self.error)
+ self.error = True
if self.cuda is None:
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected.')
self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.')
self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:')
self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a')
self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc')
return
if self.cudart_path is None:
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.')
self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable')
self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM</s>
===========changed ref 6===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 1
<s> it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a')
self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from 1b into your .bashrc file, located at ~/.bashrc')
self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.')
self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/cuda_install.sh')
self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.')
self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local')
return
make_cmd = f'CUDA_VERSION={self.cuda_version_string}'
if len(self.cuda_version_string) < 3:
make_cmd += ' make cuda92'
elif self.cuda_version_string == '110':
make_cmd += ' make cuda110'
elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0:
make_cmd += ' make cuda11x'
elif self.cuda_version_string == '100':
self.add_log_entry('CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.')
</s>
|
bitsandbytes.optim.adamw/AdamW32bit.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<0>:<del> super().__init__(
<1>:<del> "adam",
<2>:<del> params,
<3>:<del> lr,
<4>:<del> betas,
<5>:<del> eps,
<6>:<del> weight_decay,
<7>:<del> 32,
<8>:<del> args,
<9>:<del> min_8bit_size,
<10>:<del> percentile_clipping,
<11>:<del> block_wise,
<12>:<del> )
<13>:<add> super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
|
<s>size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
<0> super().__init__(
<1> "adam",
<2> params,
<3> lr,
<4> betas,
<5> eps,
<6> weight_decay,
<7> 32,
<8> args,
<9> min_8bit_size,
<10> percentile_clipping,
<11> block_wise,
<12> )
<13>
|
===========changed ref 0===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 1===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 2===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 3===========
<s>size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "adam",
- params,
- lr,
- betas,
- eps,
- weight_decay,
- 8,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged )
===========changed ref 4===========
<s>size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "adam",
- params,
- lr,
- betas,
- eps,
- weight_decay,
- optim_bits,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged )
===========changed ref 5===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
+ self.error = False
===========changed ref 6===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
+ if getattr(self, 'error', False): return
+ print(self.error)
+ self.error = True
if self.cuda is None:
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected.')
self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.')
self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:')
self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a')
self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc')
return
if self.cudart_path is None:
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.')
self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable')
self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM</s>
|
bitsandbytes.optim.adam/Adam.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<0>:<del> super().__init__(
<1>:<del> "adam",
<2>:<del> params,
<3>:<del> lr,
<4>:<del> betas,
<5>:<del> eps,
<6>:<del> weight_decay,
<7>:<del> optim_bits,
<8>:<del> args,
<9>:<del> min_8bit_size,
<10>:<del> percentile_clipping,
<11>:<del> block_wise,
<12>:<del> )
<13>:<add> super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
|
<s>8bit_size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
<0> super().__init__(
<1> "adam",
<2> params,
<3> lr,
<4> betas,
<5> eps,
<6> weight_decay,
<7> optim_bits,
<8> args,
<9> min_8bit_size,
<10> percentile_clipping,
<11> block_wise,
<12> )
<13>
|
===========unchanged ref 0===========
at: bitsandbytes.optim.optimizer
Optimizer2State(optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, is_paged=False)
at: bitsandbytes.optim.optimizer.Optimizer2State
__init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, is_paged=False)
__init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, is_paged=False)
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
+ self.error = False
===========changed ref 1===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 2===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 3===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 4===========
<s>size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "adam",
- params,
- lr,
- betas,
- eps,
- weight_decay,
- 32,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
===========changed ref 5===========
<s>size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "adam",
- params,
- lr,
- betas,
- eps,
- weight_decay,
- 8,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged )
===========changed ref 6===========
<s>size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "adam",
- params,
- lr,
- betas,
- eps,
- weight_decay,
- optim_bits,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged )
|
bitsandbytes.optim.adam/Adam8bit.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<0>:<del> super().__init__(
<1>:<del> "adam",
<2>:<del> params,
<3>:<del> lr,
<4>:<del> betas,
<5>:<del> eps,
<6>:<del> weight_decay,
<7>:<del> 8,
<8>:<del> args,
<9>:<del> min_8bit_size,
<10>:<del> percentile_clipping,
<11>:<del> block_wise,
<12>:<del> )
<13>:<add> super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
|
<s>8bit_size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
<0> super().__init__(
<1> "adam",
<2> params,
<3> lr,
<4> betas,
<5> eps,
<6> weight_decay,
<7> 8,
<8> args,
<9> min_8bit_size,
<10> percentile_clipping,
<11> block_wise,
<12> )
<13>
|
===========changed ref 0===========
# module: bitsandbytes.optim.adam
+ class PagedAdam32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 1===========
# module: bitsandbytes.optim.adam
+ class PagedAdam8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 2===========
# module: bitsandbytes.optim.adam
+ class PagedAdam(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 3===========
<s>8bit_size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "adam",
- params,
- lr,
- betas,
- eps,
- weight_decay,
- optim_bits,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
===========changed ref 4===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
+ self.error = False
===========changed ref 5===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 6===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 7===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 8===========
<s>size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "adam",
- params,
- lr,
- betas,
- eps,
- weight_decay,
- 32,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
===========changed ref 9===========
<s>size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "adam",
- params,
- lr,
- betas,
- eps,
- weight_decay,
- 8,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged )
|
bitsandbytes.optim.adam/Adam32bit.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<0>:<del> super().__init__(
<1>:<del> "adam",
<2>:<del> params,
<3>:<del> lr,
<4>:<del> betas,
<5>:<del> eps,
<6>:<del> weight_decay,
<7>:<del> 32,
<8>:<del> args,
<9>:<del> min_8bit_size,
<10>:<del> percentile_clipping,
<11>:<del> block_wise,
<12>:<del> )
<13>:<add> super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
|
<s>8bit_size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
<0> super().__init__(
<1> "adam",
<2> params,
<3> lr,
<4> betas,
<5> eps,
<6> weight_decay,
<7> 32,
<8> args,
<9> min_8bit_size,
<10> percentile_clipping,
<11> block_wise,
<12> )
<13>
|
===========unchanged ref 0===========
at: torch.optim.optimizer.Optimizer
OptimizerPreHook: TypeAlias = Callable[[Self, Args, Kwargs], Optional[Tuple[Args, Kwargs]]] # type: ignore[misc]
OptimizerPostHook: TypeAlias = Callable[[Self, Args, Kwargs], None] # type: ignore[misc]
_optimizer_step_pre_hooks: Dict[int, OptimizerPreHook]
_optimizer_step_post_hooks: Dict[int, OptimizerPostHook]
_optimizer_state_dict_pre_hooks: 'OrderedDict[int, Callable[["Optimizer"], None]]'
_optimizer_state_dict_post_hooks: 'OrderedDict[int, Callable[["Optimizer", StateDict], Optional[StateDict]]]'
_optimizer_load_state_dict_pre_hooks: 'OrderedDict[int, Callable[["Optimizer", StateDict], Optional[StateDict]]]'
_optimizer_load_state_dict_post_hooks: 'OrderedDict[int, Callable[["Optimizer"], None]]'
__init__(params: params_t, defaults: Dict[str, Any]) -> None
===========changed ref 0===========
# module: bitsandbytes.optim.adam
+ class PagedAdam32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 1===========
# module: bitsandbytes.optim.adam
+ class PagedAdam8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 2===========
# module: bitsandbytes.optim.adam
+ class PagedAdam(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 3===========
<s>8bit_size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "adam",
- params,
- lr,
- betas,
- eps,
- weight_decay,
- 8,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
===========changed ref 4===========
<s>8bit_size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "adam",
- params,
- lr,
- betas,
- eps,
- weight_decay,
- optim_bits,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
===========changed ref 5===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
+ self.error = False
===========changed ref 6===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 7===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 8===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
|
bitsandbytes.autograd._functions/MatMul8bitLt.forward
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
# module: bitsandbytes.autograd._functions
class MatMul8bitLt(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState):
<0> using_igemmlt = supports_igemmlt(A.device) and not state.force_no_igemmlt
<1> # default of pytorch behavior if inputs are empty
<2> ctx.is_empty = False
<3> if prod(A.shape) == 0:
<4> ctx.is_empty = True
<5> ctx.A = A
<6> ctx.B = B
<7> ctx.bias = bias
<8> if A.shape[-1] == B.shape[0]:
<9> return torch.empty(A.shape[:-1] + B.shape[1:], dtype=A.dtype, device=A.device)
<10> else:
<11> return torch.empty(A.shape[:-1] + B.shape[:1], dtype=A.dtype, device=A.device)
<12>
<13> # 1. Quantize A
<14> # 2. Quantize B
<15> # 3. Matmul
<16> # 4. Mixed-precision decomposition matmul
<17> # 5. Save state
<18> formatB = state.formatB
<19> input_shape = A.shape
<20> if state.outlier_pool is None:
<21> state.outlier_pool = GlobalOutlierPooler.get_instance()
<22>
<23> # Cast A to fp16
<24> if A.dtype != torch.float16:
<25> warnings.warn(f"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization")
<26>
<27> # 1. Quantize A
<28> if len(A.shape) == 3:
<29> A = A.view(-1, A.shape[-1]).contiguous()
<30> CA,</s>
|
===========below chunk 0===========
# module: bitsandbytes.autograd._functions
class MatMul8bitLt(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState):
# offset: 1
if state.threshold > 0.0 and coo_tensorA is not None:
if state.has_fp16_weights:
idx = torch.unique(coo_tensorA.colidx).long()
CA[:, idx] = 0
CAt[:, idx] = 0
subA = A[:, idx]
state.subB = B[:, idx].t().contiguous()
state.idx = idx
else:
if state.CxB is None and using_igemmlt:
# B in in 8-bit row-major, we can transform it back to 16-bit to extract outlier dimensions
# we also need to convert it to the turing/ampere format
state.CxB, state.SB = F.transform(state.CB, to_order=formatB)
else:
if not state.has_fp16_weights and state.CxB is None and using_igemmlt:
state.CxB, state.SB = F.transform(state.CB, to_order=formatB)
subA = None
# 2. Quantize B
if state.has_fp16_weights:
has_grad = True if (getattr(B, "grad", None) is not None) else False
is_transposed = not B.is_contiguous() and B.shape[0] == B.stride(1)
if is_transposed:
B = B.contiguous()
if (state.is_training and not has_grad) or state.CxB is None:
state.reset_grads()
(
CB,
state.CBt,
</s>
===========below chunk 1===========
# module: bitsandbytes.autograd._functions
class MatMul8bitLt(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState):
# offset: 2
<s>.CxB is None:
state.reset_grads()
(
CB,
state.CBt,
state.SCB,
state.SCBt,
coo_tensorB,
) = F.double_quant(B.to(torch.float16))
if using_igemmlt:
state.CxB, state.SB = F.transform(CB, to_order=formatB)
else:
state.CB = CB
else:
has_grad = False
if coo_tensorA is not None and not state.has_fp16_weights:
# extract outliers
outlier_idx = torch.unique(coo_tensorA.colidx)
state.idx = outlier_idx
# state.outlier_pool.add_outliers(outlier_idx, A.shape[-1])
# if state.use_pool and state.outlier_pool.model_dim == A.shape[-1]:
# # do not use pool for 2nd FFN layer
# state.idx = state.outlier_pool.get_current_outlier_idx().to(A.device)
# else:
# state.idx = outlier_idx
if state.CxB is not None:
outliers = F.extract_outliers(state.CxB, state.SB, state.idx.int())
else:
outliers = state.CB[:, state.idx.long()].clone()
</s>
===========below chunk 2===========
# module: bitsandbytes.autograd._functions
class MatMul8bitLt(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState):
# offset: 3
<s>subB = (outliers * state.SCB.view(-1, 1) / 127.0).t().contiguous().to(A.dtype)
CA[:, state.idx.long()] = 0
CAt[:, state.idx.long()] = 0
subA = A[:, state.idx.long()]
shapeB = state.SB[0] if state.SB else B.shape
if len(input_shape) == 3:
output_shape = (input_shape[0], input_shape[1], shapeB[0])
else:
output_shape = (input_shape[0], shapeB[0])
# 3. Matmul
if using_igemmlt:
C32A, SA = F.transform(CA, "col32")
out32, Sout32 = F.igemmlt(C32A, state.CxB, SA, state.SB)
if bias is None or bias.dtype == torch.float16:
# we apply the fused bias here
output = F.mm_dequant(out32, Sout32, SCA, state.SCB, bias=bias)
output = output.to(A.dtype)
else: # apply bias separately
output = F.mm_dequant(out32, Sout32, SCA, state.SCB, bias=None)
output = output.to(A.dtype).add_(bias)
else:
A_wo_outliers = A.clone</s>
===========below chunk 3===========
# module: bitsandbytes.autograd._functions
class MatMul8bitLt(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState):
# offset: 4
<s> if state.idx is not None:
A_wo_outliers[:, state.idx.long()] = 0
output = torch.nn.functional.linear(A_wo_outliers, state.CB.to(A.dtype))
output = output.mul_(state.SCB.unsqueeze(0).mul(1.0 / 127.0))
if bias is not None:
output = output.add_(bias)
# 4. Mixed-precision decomposition matmul
if coo_tensorA is not None and subA is not None:
output += torch.matmul(subA, state.subB)
# 5. Save state
ctx.state = state
ctx.formatB = formatB
ctx.grad_shape = input_shape
ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype
if any(ctx.needs_input_grad[:2]):
ctx.tensors = (CAt, subA)
ctx.tensor_states = (SCAt, state.idx)
else:
ctx.tensors = [None, None]
ctx.tensor_states = (None, None)
ctx.save_for_backward(None, None)
clone_func = torch.clone if len(output_shape) == 3 else lambda x: x
return clone_func(output.view(output_shape))
|
|
bitsandbytes.autograd._functions/MatMul8bitLt.backward
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<4>:<add> CAt, subA, A = ctx.tensors
<del> CAt, subA = ctx.tensors
<20>:<add> #grad_B = torch.matmul(grad_output.t(), A)
|
# module: bitsandbytes.autograd._functions
class MatMul8bitLt(torch.autograd.Function):
@staticmethod
def backward(ctx, grad_output):
<0> if ctx.is_empty:
<1> bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)
<2> return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None
<3> req_gradA, req_gradB, _, req_gradBias, _ = ctx.needs_input_grad
<4> CAt, subA = ctx.tensors
<5> SCAt, idx = ctx.tensor_states
<6> formatB = ctx.formatB
<7> state = ctx.state
<8> grad_A = grad_B = grad_bias = None
<9>
<10> if req_gradBias:
<11> # compute grad_bias first before changing grad_output dtype
<12> grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)
<13>
<14> # Cast grad_output to fp16
<15> if len(grad_output.shape) == 3:
<16> grad_output = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()
<17>
<18> Cgrad, Cgradt, SCgrad, SCgradt, coo_tensor = F.double_quant(grad_output.to(torch.float16))
<19> if req_gradB:
<20> CxAt, SAt = F.transform(CAt, formatB, transpose=True)
<21> C32grad, Sgrad = F.transform(Cgradt, "col32", transpose=True)
<22> gradB32, SgradB32 = F.igemmlt(C32grad, CxAt, Sgrad, SAt)
<23> grad_B = F.mm_dequant(gradB32, SgradB32, SCgradt, SCAt)
<24> if state.threshold > 0.0 and subA is not None:
<25> grad_B[:, idx] += torch.matmul(grad_output.t(), subA)</s>
|
===========below chunk 0===========
# module: bitsandbytes.autograd._functions
class MatMul8bitLt(torch.autograd.Function):
@staticmethod
def backward(ctx, grad_output):
# offset: 1
if req_gradA:
if state.CBt is not None:
C32grad, Sgrad = F.transform(Cgrad, "col32")
if state.CxBt is None:
state.CxBt, state.SBt = F.transform(state.CBt, to_order=formatB, transpose=True)
gradA32, SgradA32 = F.igemmlt(C32grad, state.CxBt, Sgrad, state.SBt)
grad_A = F.mm_dequant(gradA32, SgradA32, SCgrad, state.SCBt).view(ctx.grad_shape).to(ctx.dtype_A)
elif state.CB is not None:
CB = state.CB.to(ctx.dtype_A, copy=True).mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0))
grad_A = torch.matmul(grad_output, CB).view(ctx.grad_shape).to(ctx.dtype_A)
elif state.CxB is not None:
CB = (
undo_layout(state.CxB, state.tile_indices)
.to(ctx.dtype_A)
.mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0))
)
grad_A = torch.matmul(grad_output, CB).view(ctx.grad_shape).to(ctx.dtype_A)
else:
raise Exception("State must contain either CBt or CB or CxB matrix for backward")
return grad_A, grad_B, None, grad_bias, None
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
+ self.error = False
===========changed ref 1===========
# module: bitsandbytes.optim.adam
+ class PagedAdam32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 2===========
# module: bitsandbytes.optim.adam
+ class PagedAdam8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 3===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 4===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 5===========
# module: bitsandbytes.optim.adam
+ class PagedAdam(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 6===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 7===========
<s>8bit_size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "adam",
- params,
- lr,
- betas,
- eps,
- weight_decay,
- 32,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
|
bitsandbytes.nn.modules/OutlierAwareLinear.forward
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<13>:<del> return self.forward_with_outliers(x, self.outlier_dim)
<14>:<del>
|
# module: bitsandbytes.nn.modules
class OutlierAwareLinear(nn.Linear):
def forward(self, x):
<0> if self.outlier_dim is None:
<1> tracer = OutlierTracer.get_instance()
<2> if not tracer.is_initialized():
<3> print('Please use OutlierTracer.initialize(model) before using the OutlierAwareLinear layer')
<4> outlier_idx = tracer.get_outliers(self.weight)
<5> #print(outlier_idx, tracer.get_hvalue(self.weight))
<6> self.outlier_dim = outlier_idx
<7>
<8> if not self.is_quantized:
<9> w = self.quantize_weight(self.weight, self.outlier_dim)
<10> self.weight.data.copy_(w)
<11> self.is_quantized = True
<12>
<13> return self.forward_with_outliers(x, self.outlier_dim)
<14>
|
===========unchanged ref 0===========
at: bitsandbytes.nn.modules
T = TypeVar("T", bound="torch.nn.Module")
at: torch._C
device(device: Union[_device, _int, str])
device(type: str, index: _int)
dtype()
===========changed ref 0===========
# module: bitsandbytes.nn.modules
+ class Params4bit(torch.nn.Parameter):
+ def __new__(cls, data=None, requires_grad=True, quant_state=None, blocksize=64, compress_statistics=True, quant_type='fp4'):
+ if data is None:
+ data = torch.empty(0)
+
+ self = torch.Tensor._make_subclass(cls, data, requires_grad)
+ self.blocksize = blocksize
+ self.compress_statistics = compress_statistics
+ self.quant_type = quant_type
+ self.quant_state = quant_state
+ self.data = data
+ return self
+
===========changed ref 1===========
# module: bitsandbytes.autograd._functions
+ def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
+ assert quant_state is not None
+ return MatMul4Bit.apply(A, B, out, bias, quant_state)
+
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
+ self.error = False
===========changed ref 3===========
# module: bitsandbytes.optim.adam
+ class PagedAdam32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 4===========
# module: bitsandbytes.optim.adam
+ class PagedAdam8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 5===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 6===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 7===========
# module: bitsandbytes.optim.adam
+ class PagedAdam(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 8===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 9===========
<s>8bit_size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "adam",
- params,
- lr,
- betas,
- eps,
- weight_decay,
- 32,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
===========changed ref 10===========
<s>8bit_size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "adam",
- params,
- lr,
- betas,
- eps,
- weight_decay,
- 8,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
|
bitsandbytes.functional/is_on_gpu
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<1>:<add> gpu_ids = set()
<3>:<add> is_paged = getattr(t, 'is_paged', False)
<add> on_gpu &= (t.device.type == 'cuda' or is_paged)
<del> on_gpu &= t.device.type == 'cuda'
<4>:<add> if not is_paged:
<add> gpu_ids.add(t.device.index)
<add> if not on_gpu:
<add> raise TypeError(f'All input tensors need to be on the same GPU, but found some tensors to not be on a GPU:\n {[(t.shape, t.device) for t in tensors]}')
<add> if len(gpu_ids) > 1:
<add> raise TypeError(f'Input tensors need to be on the same GPU, but found the following tensor and device combinations:\n {[(t.shape, t.device) for t in tensors]}')
|
# module: bitsandbytes.functional
def is_on_gpu(tensors):
<0> on_gpu = True
<1> for t in tensors:
<2> if t is None: continue # NULL pointers are fine
<3> on_gpu &= t.device.type == 'cuda'
<4> return on_gpu
<5>
|
===========changed ref 0===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 1===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 2===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 3===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 4===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ def prefetch_tensor(A, to_cpu=False):
+ assert A.is_paged, 'Only paged tensors can be prefetched!'
+ if to_cpu:
+ deviceid = -1
+ else:
+ deviceid = A.page_deviceid
+
+ num_bytes = dtype2bytes[A.dtype]*A.numel()
+ lib.cprefetch(get_ptr(A), ct.c_size_t(num_bytes), ct.c_int32(deviceid))
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ @classmethod
+ def get_instance(cls):
+ if cls._instance is None:
+ cls._instance = cls.__new__(cls)
+ cls._instance.initialize()
+ return cls._instance
+
===========changed ref 8===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def prefetch_all(self, to_cpu=False):
+ # assume the first added, will be hte
+ # ones that are used first, so swap them in last
+ # in the case they are evicted again
+ for t in self.paged_tensors[::-1]:
+ prefetch_tensor(t, to_cpu)
+
===========changed ref 9===========
# module: bitsandbytes.functional
+ def get_paged(*shape, dtype=torch.float32, device=torch.device('cuda', index=0)):
+ num_bytes = dtype2bytes[dtype]*prod(shape)
+ cuda_ptr = lib.cget_managed_ptr(ct.c_size_t(num_bytes))
+ c_ptr = ct.cast(cuda_ptr, ct.POINTER(ct.c_int))
+ new_array = np.ctypeslib.as_array(c_ptr, shape=shape)
+ out = torch.frombuffer(new_array, dtype=dtype, count=prod(shape)).view(shape)
+ out.is_paged = True
+ out.page_deviceid = device.index
+ return out
+
===========changed ref 10===========
# module: bitsandbytes.functional
+ def elementwise_func(func_name, A, B, value, prefetch=True):
+ func = None
+ if A.dtype == torch.float32:
+ func = getattr(lib, f'c{func_name}_fp32', None)
+ cvalue = ct.c_float(value)
+ elif A.dtype == torch.uint8:
+ func = getattr(lib, f'c{func_name}_uint8', None)
+ cvalue = ct.c_uint8(value)
+
+ if func is None: raise NotImplementedError(f'Function not implemented: {func_name}')
+
+ is_managed = getattr(A, 'is_managed', False)
+ if is_managed and prefetch:
+ prefetch_tensor(A)
+ if B is not None: prefetch_tensor(B)
+
+ func(get_ptr(A), get_ptr(B), cvalue, ct.c_int64(A.numel()))
+ if A.is_paged or B.is_paged:
+ # paged function are fully asynchronous
+ # if we return from this function, we want to the tensor
+ # to be in the correct state, that is the final state after the
+ # operation occured. So we synchronize.
+ torch.cuda.synchronize()
+
===========changed ref 11===========
# module: bitsandbytes.nn.modules
+ class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
+
===========changed ref 12===========
# module: bitsandbytes.nn.modules
+ class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
+
===========changed ref 13===========
# module: bitsandbytes.autograd._functions
+ def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
+ assert quant_state is not None
+ return MatMul4Bit.apply(A, B, out, bias, quant_state)
+
===========changed ref 14===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
+ self.error = False
===========changed ref 15===========
# module: bitsandbytes.optim.adam
+ class PagedAdam32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 16===========
# module: bitsandbytes.optim.adam
+ class PagedAdam8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 17===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
|
bitsandbytes.functional/quantize_blockwise
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<15>:<del> rand : torch.Tensor
<16>:<del> The tensor for stochastic rounding.
|
# module: bitsandbytes.functional
+ def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
- def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, rand=None, out: Tensor = None, blocksize=4096) -> Tensor:
<0> """
<1> Quantize tensor A in blocks of size 4096 values.
<2>
<3> Quantizes tensor A by dividing it into blocks of 4096 values.
<4> Then the absolute maximum value within these blocks is calculated
<5> for the non-linear quantization.
<6>
<7> Parameters
<8> ----------
<9> A : torch.Tensor
<10> The input tensor.
<11> code : torch.Tensor
<12> The quantization map.
<13> absmax : torch.Tensor
<14> The absmax values.
<15> rand : torch.Tensor
<16> The tensor for stochastic rounding.
<17> out : torch.Tensor
<18> The output tensor (8-bit).
<19>
<20> Returns
<21> -------
<22> torch.Tensor:
<23> The 8-bit tensor.
<24> tuple(torch.Tensor, torch.Tensor):
<25> The quantization state to undo the quantization.
<26> """
<27>
<28>
<29> if code is None:
<30> if "dynamic" not in name2qmap:
<31> name2qmap["dynamic"] = create_dynamic_map().to(A.device)
<32> code = name2qmap["dynamic"]
<33>
<34> if absmax is None:
<35> n = A.numel()
<36> blocks = n // blocksize
<37> blocks += 1 if n % blocksize > 0 else 0
<38> absmax = torch.zeros((blocks,), device=A.device)
<39>
<40> if out is None:
<41> out = torch.zeros_like(A, dtype=torch.uint8)
<42>
<43> if A.device.type != 'cpu':
<44> assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64]
<45> cblock</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
+ def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
- def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, rand=None, out: Tensor = None, blocksize=4096) -> Tensor:
# offset: 1
prev_device = pre_call(A.device)
code = code.to(A.device)
if rand is not None:
is_on_gpu([code, A, out, absmax, rand])
assert rand.numel() >= 1024
rand_offset = random.randint(0, 1023)
if A.dtype == torch.float32:
lib.cquantize_blockwise_stochastic_fp32(get_ptr(code), get_ptr(A),get_ptr(absmax), get_ptr(out), get_ptr(rand), ct.c_int32(rand_offset), cblocksize, ct.c_int(A.numel()))
elif A.dtype == torch.float16:
lib.cquantize_blockwise_stochastic_fp16(get_ptr(code), get_ptr(A),get_ptr(absmax), get_ptr(out), get_ptr(rand), ct.c_int32(rand_offset), cblocksize, ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
else:
is_on_gpu([code, A, out, absmax])
if A.dtype == torch.float32:
lib.cquantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
elif A.dtype == torch.float16:
lib.cquantize_blockwise_fp16(get_ptr(code), get_</s>
===========below chunk 1===========
# module: bitsandbytes.functional
+ def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
- def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, rand=None, out: Tensor = None, blocksize=4096) -> Tensor:
# offset: 2
<s>.dtype == torch.float16:
lib.cquantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
# cpu
code = code.cpu()
assert rand is None
lib.cquantize_blockwise_cpu_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
return out, (absmax, code)
===========changed ref 0===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 1===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 2===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 3===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 4===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ def prefetch_tensor(A, to_cpu=False):
+ assert A.is_paged, 'Only paged tensors can be prefetched!'
+ if to_cpu:
+ deviceid = -1
+ else:
+ deviceid = A.page_deviceid
+
+ num_bytes = dtype2bytes[A.dtype]*A.numel()
+ lib.cprefetch(get_ptr(A), ct.c_size_t(num_bytes), ct.c_int32(deviceid))
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ @classmethod
+ def get_instance(cls):
+ if cls._instance is None:
+ cls._instance = cls.__new__(cls)
+ cls._instance.initialize()
+ return cls._instance
+
===========changed ref 8===========
# module: bitsandbytes.functional
def is_on_gpu(tensors):
on_gpu = True
+ gpu_ids = set()
for t in tensors:
if t is None: continue # NULL pointers are fine
+ is_paged = getattr(t, 'is_paged', False)
+ on_gpu &= (t.device.type == 'cuda' or is_paged)
- on_gpu &= t.device.type == 'cuda'
+ if not is_paged:
+ gpu_ids.add(t.device.index)
+ if not on_gpu:
+ raise TypeError(f'All input tensors need to be on the same GPU, but found some tensors to not be on a GPU:\n {[(t.shape, t.device) for t in tensors]}')
+ if len(gpu_ids) > 1:
+ raise TypeError(f'Input tensors need to be on the same GPU, but found the following tensor and device combinations:\n {[(t.shape, t.device) for t in tensors]}')
return on_gpu
===========changed ref 9===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def prefetch_all(self, to_cpu=False):
+ # assume the first added, will be hte
+ # ones that are used first, so swap them in last
+ # in the case they are evicted again
+ for t in self.paged_tensors[::-1]:
+ prefetch_tensor(t, to_cpu)
+
|
bitsandbytes.functional/dequantize_blockwise
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<33>:<add>
<34>:<add> quant_state = (absmax, code, blocksize)
<del> quant_state = (absmax, code)
<35>:<add> assert absmax is not None and out is not None
<36>:<add> absmax, code, blocksize, nested, offset, state2 = quant_state
<add> if nested:
<add> absmax = dequantize_blockwise(absmax, state2)
<add> absmax += offset
<del> absmax, code = quant_state
|
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
+ nested=False
) -> Tensor:
<0> """
<1> Dequantizes blockwise quantized values.
<2>
<3> Dequantizes the tensor A with maximum absolute values absmax in
<4> blocks of size 4096.
<5>
<6> Parameters
<7> ----------
<8> A : torch.Tensor
<9> The input 8-bit tensor.
<10> quant_state : tuple(torch.Tensor, torch.Tensor)
<11> Tuple of code and absmax values.
<12> absmax : torch.Tensor
<13> The absmax values.
<14> code : torch.Tensor
<15> The quantization map.
<16> out : torch.Tensor
<17> Dequantized output tensor (default: float32)
<18>
<19>
<20> Returns
<21> -------
<22> torch.Tensor:
<23> Dequantized tensor (default: float32)
<24> """
<25> assert quant_state is not None or absmax is not None
<26> if code is None and quant_state is None:
<27> if "dynamic" not in name2qmap:
<28> name2qmap["dynamic"] = create_dynamic_map().to(A.device)
<29> code = name2qmap["dynamic"]
<30>
<31> if out is None:
<32> out = torch.zeros_like(A, dtype=torch.float32)
<33> if quant_state is None:
<34> quant_state = (absmax, code)
<35> else:
<36> absmax, code = quant_state
<37>
<38>
<39> if A.device.type != 'cpu':
<40> device = pre_call(A.device)
<41> code = code.to(A.device)
<42> if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]:
<43> raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values:</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
+ nested=False
) -> Tensor:
# offset: 1
is_on_gpu([A, out])
if out.dtype == torch.float32:
lib.cdequantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
elif out.dtype == torch.float16:
lib.cdequantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
code = code.cpu()
lib.cdequantize_blockwise_cpu_fp32(get_ptr(quant_state[1]), get_ptr(A), get_ptr(quant_state[0]), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
return out
===========changed ref 0===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 1===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 2===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 3===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 4===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ def prefetch_tensor(A, to_cpu=False):
+ assert A.is_paged, 'Only paged tensors can be prefetched!'
+ if to_cpu:
+ deviceid = -1
+ else:
+ deviceid = A.page_deviceid
+
+ num_bytes = dtype2bytes[A.dtype]*A.numel()
+ lib.cprefetch(get_ptr(A), ct.c_size_t(num_bytes), ct.c_int32(deviceid))
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ @classmethod
+ def get_instance(cls):
+ if cls._instance is None:
+ cls._instance = cls.__new__(cls)
+ cls._instance.initialize()
+ return cls._instance
+
===========changed ref 8===========
# module: bitsandbytes.functional
def is_on_gpu(tensors):
on_gpu = True
+ gpu_ids = set()
for t in tensors:
if t is None: continue # NULL pointers are fine
+ is_paged = getattr(t, 'is_paged', False)
+ on_gpu &= (t.device.type == 'cuda' or is_paged)
- on_gpu &= t.device.type == 'cuda'
+ if not is_paged:
+ gpu_ids.add(t.device.index)
+ if not on_gpu:
+ raise TypeError(f'All input tensors need to be on the same GPU, but found some tensors to not be on a GPU:\n {[(t.shape, t.device) for t in tensors]}')
+ if len(gpu_ids) > 1:
+ raise TypeError(f'Input tensors need to be on the same GPU, but found the following tensor and device combinations:\n {[(t.shape, t.device) for t in tensors]}')
return on_gpu
===========changed ref 9===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def prefetch_all(self, to_cpu=False):
+ # assume the first added, will be hte
+ # ones that are used first, so swap them in last
+ # in the case they are evicted again
+ for t in self.paged_tensors[::-1]:
+ prefetch_tensor(t, to_cpu)
+
===========changed ref 10===========
# module: bitsandbytes.functional
+ def create_normal_map(offset=0.9677083, use_extra_value=True):
+ if use_extra_value:
+ # one more positive value, this is an asymmetric type
+ v1 = norm.ppf(torch.linspace(offset, 0.5, 9)[:-1]).tolist()
+ v2 = [0]*(256-15) ## we have 15 non-zero values in this data type
+ v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
+ v = v1 + v2 + v3
+ else:
+ v1 = norm.ppf(torch.linspace(offset, 0.5, 8)[:-1]).tolist()
+ v2 = [0]*(256-14) ## we have 14 non-zero values in this data type
+ v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
+ v = v1 + v2 + v3
+
+ values = torch.Tensor(v)
+ values = values.sort().values
+ values /= values.max()
+ assert values.numel() == 256
+ return values
+
===========changed ref 11===========
# module: bitsandbytes.functional
+ def get_paged(*shape, dtype=torch.float32, device=torch.device('cuda', index=0)):
+ num_bytes = dtype2bytes[dtype]*prod(shape)
+ cuda_ptr = lib.cget_managed_ptr(ct.c_size_t(num_bytes))
+ c_ptr = ct.cast(cuda_ptr, ct.POINTER(ct.c_int))
+ new_array = np.ctypeslib.as_array(c_ptr, shape=shape)
+ out = torch.frombuffer(new_array, dtype=dtype, count=prod(shape)).view(shape)
+ out.is_paged = True
+ out.page_deviceid = device.index
+ return out
+
|
bitsandbytes.functional/optimizer_update_32bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<43>:<del> if optimizer_name not in str2optimizer32bit:
<44>:<del> raise NotImplementedError(
<45>:<del> f
|
<s>
g: Tensor,
p: Tensor,
state1: Tensor,
beta1: float,
eps: float,
step: int,
lr: float,
state2: Tensor = None,
beta2: float = 0.0,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
unorm_vec: Tensor = None,
max_unorm: float = 0.0,
skip_zeros=False,
) -> None:
<0> """
<1> Performs an inplace optimizer update with one or two optimizer states.
<2>
<3> Universal optimizer update for 32-bit state and 32/16-bit gradients/weights.
<4>
<5> Parameters
<6> ----------
<7> optimizer_name : str
<8> The name of the optimizer: {adam}.
<9> g : torch.Tensor
<10> Gradient tensor.
<11> p : torch.Tensor
<12> Parameter tensor.
<13> state1 : torch.Tensor
<14> Optimizer state 1.
<15> beta1 : float
<16> Optimizer beta1.
<17> eps : float
<18> Optimizer epsilon.
<19> weight_decay : float
<20> Weight decay.
<21> step : int
<22> Current optimizer step.
<23> lr : float
<24> The learning rate.
<25> state2 : torch.Tensor
<26> Optimizer state 2.
<27> beta2 : float
<28> Optimizer beta2.
<29> gnorm_scale : float
<30> The factor to rescale the gradient to the max clip value.
<31> unorm_vec : torch.Tensor
<32> The tensor for the update norm.
<33> max_unorm : float
<34> The maximum update norm relative to the weight norm.
<35> skip_zeros : bool
<36> Whether to skip zero-valued gradients or not (default: False).
<37> """
<38>
<39> param_norm = 0.0
<40> if max_unorm > 0.0:
<41> param_norm = torch.norm(p.data.float())
<42>
<43> if optimizer_name not in str2optimizer32bit:
<44> raise NotImplementedError(
<45> f</s>
|
===========below chunk 0===========
<s>,
p: Tensor,
state1: Tensor,
beta1: float,
eps: float,
step: int,
lr: float,
state2: Tensor = None,
beta2: float = 0.0,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
unorm_vec: Tensor = None,
max_unorm: float = 0.0,
skip_zeros=False,
) -> None:
# offset: 1
)
prev_device = pre_call(g.device)
is_on_gpu([g, p, state1, state2, unorm_vec])
if g.dtype == torch.float32 and state1.dtype == torch.float32:
str2optimizer32bit[optimizer_name][0](
get_ptr(g),
get_ptr(p),
get_ptr(state1),
get_ptr(state2),
get_ptr(unorm_vec),
ct.c_float(max_unorm),
ct.c_float(param_norm),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_float(weight_decay),
ct.c_int32(step),
ct.c_float(lr),
ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros),
ct.c_int32(g.numel()),
)
elif g.dtype == torch.float16 and state1.dtype == torch.float32:
str2optimizer32bit[optimizer_name][1](
get_ptr(g),
get_ptr(p),
get_ptr(state1),
get_ptr(state2),
get_ptr(unorm_vec),
ct.c_float(max_unorm),
ct.c_float(param_norm),
ct.c_float(beta1),
ct.c_float(beta2),
</s>
===========below chunk 1===========
<s>,
p: Tensor,
state1: Tensor,
beta1: float,
eps: float,
step: int,
lr: float,
state2: Tensor = None,
beta2: float = 0.0,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
unorm_vec: Tensor = None,
max_unorm: float = 0.0,
skip_zeros=False,
) -> None:
# offset: 2
<s>_float(param_norm),
ct.c_float(beta1),
ct.c_float(beta2),
ct.c_float(eps),
ct.c_float(weight_decay),
ct.c_int32(step),
ct.c_float(lr),
ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros),
ct.c_int32(g.numel()),
)
else:
raise ValueError(
f"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}"
)
post_call(prev_device)
===========changed ref 0===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 1===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 2===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 3===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 4===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ def prefetch_tensor(A, to_cpu=False):
+ assert A.is_paged, 'Only paged tensors can be prefetched!'
+ if to_cpu:
+ deviceid = -1
+ else:
+ deviceid = A.page_deviceid
+
+ num_bytes = dtype2bytes[A.dtype]*A.numel()
+ lib.cprefetch(get_ptr(A), ct.c_size_t(num_bytes), ct.c_int32(deviceid))
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ @classmethod
+ def get_instance(cls):
+ if cls._instance is None:
+ cls._instance = cls.__new__(cls)
+ cls._instance.initialize()
+ return cls._instance
+
===========changed ref 8===========
# module: bitsandbytes.functional
def is_on_gpu(tensors):
on_gpu = True
+ gpu_ids = set()
for t in tensors:
if t is None: continue # NULL pointers are fine
+ is_paged = getattr(t, 'is_paged', False)
+ on_gpu &= (t.device.type == 'cuda' or is_paged)
- on_gpu &= t.device.type == 'cuda'
+ if not is_paged:
+ gpu_ids.add(t.device.index)
+ if not on_gpu:
+ raise TypeError(f'All input tensors need to be on the same GPU, but found some tensors to not be on a GPU:\n {[(t.shape, t.device) for t in tensors]}')
+ if len(gpu_ids) > 1:
+ raise TypeError(f'Input tensors need to be on the same GPU, but found the following tensor and device combinations:\n {[(t.shape, t.device) for t in tensors]}')
return on_gpu
===========changed ref 9===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def prefetch_all(self, to_cpu=False):
+ # assume the first added, will be hte
+ # ones that are used first, so swap them in last
+ # in the case they are evicted again
+ for t in self.paged_tensors[::-1]:
+ prefetch_tensor(t, to_cpu)
+
===========changed ref 10===========
# module: bitsandbytes.functional
+ def create_normal_map(offset=0.9677083, use_extra_value=True):
+ if use_extra_value:
+ # one more positive value, this is an asymmetric type
+ v1 = norm.ppf(torch.linspace(offset, 0.5, 9)[:-1]).tolist()
+ v2 = [0]*(256-15) ## we have 15 non-zero values in this data type
+ v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
+ v = v1 + v2 + v3
+ else:
+ v1 = norm.ppf(torch.linspace(offset, 0.5, 8)[:-1]).tolist()
+ v2 = [0]*(256-14) ## we have 14 non-zero values in this data type
+ v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
+ v = v1 + v2 + v3
+
+ values = torch.Tensor(v)
+ values = values.sort().values
+ values /= values.max()
+ assert values.numel() == 256
+ return values
+
|
bitsandbytes.functional/optimizer_update_8bit_blockwise
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<0>:<add> optim_func = None
<3>:<add> optim_func = str2optimizer8bit_blockwise[optimizer_name][0]
<del> str2optimizer8bit_blockwise[optimizer_name][0](
<4>:<del> get_ptr(p),
<5>:<del> get_ptr(g),
<6>:<del> get_ptr(state1),
<7>:<del> get_ptr(state2),
<8>:<del> ct.c_float(beta1),
<9>:<del> ct.c_float(beta2),
<10>:<del> ct.c_float(eps),
<11>:<del> ct.c_int32(step),
<12>:<del> ct.c_float(lr),
<13>:<del> get_ptr(qmap1),
<14>:<del> get_ptr(qmap2),
<15>:<del> get_ptr(absmax1),
<16>:<del> get_ptr(absmax2),
<17>:<del> ct.c_float(weight_decay),
<18>:<del> ct.c_float(gnorm_scale),
<19>:<del> ct.c_bool(skip_zeros),
<20>:<del> ct.c_int32(g.numel()),
<21>:<del> )
<23>:<add> optim_func = str2optimizer8bit_blockwise[optimizer_name][1]
<del> str2optimizer8bit_blockwise[optimizer_name][1](
<24>:<del> get_ptr(p),
<25>:<del> get_ptr(g),
<26>:<del> get_ptr(state1),
<27>:<del> get_ptr(state2),
<28>:<del> ct.c_float(beta1),
<29>:<del> ((bad delete))
|
<s> g: Tensor,
p: Tensor,
state1: Tensor,
state2: Tensor,
beta1: float,
beta2: float,
eps: float,
step: int,
lr: float,
qmap1: Tensor,
qmap2: Tensor,
absmax1: Tensor,
absmax2: Tensor,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
skip_zeros=False,
) -> None:
<0> prev_device = pre_call(g.device)
<1> is_on_gpu([g, p, state1, state2, qmap1, qmap2, absmax1, absmax2])
<2> if g.dtype == torch.float32 and state1.dtype == torch.uint8:
<3> str2optimizer8bit_blockwise[optimizer_name][0](
<4> get_ptr(p),
<5> get_ptr(g),
<6> get_ptr(state1),
<7> get_ptr(state2),
<8> ct.c_float(beta1),
<9> ct.c_float(beta2),
<10> ct.c_float(eps),
<11> ct.c_int32(step),
<12> ct.c_float(lr),
<13> get_ptr(qmap1),
<14> get_ptr(qmap2),
<15> get_ptr(absmax1),
<16> get_ptr(absmax2),
<17> ct.c_float(weight_decay),
<18> ct.c_float(gnorm_scale),
<19> ct.c_bool(skip_zeros),
<20> ct.c_int32(g.numel()),
<21> )
<22> elif g.dtype == torch.float16 and state1.dtype == torch.uint8:
<23> str2optimizer8bit_blockwise[optimizer_name][1](
<24> get_ptr(p),
<25> get_ptr(g),
<26> get_ptr(state1),
<27> get_ptr(state2),
<28> ct.c_float(beta1),
<29> </s>
|
===========below chunk 0===========
<s> p: Tensor,
state1: Tensor,
state2: Tensor,
beta1: float,
beta2: float,
eps: float,
step: int,
lr: float,
qmap1: Tensor,
qmap2: Tensor,
absmax1: Tensor,
absmax2: Tensor,
weight_decay: float = 0.0,
gnorm_scale: float = 1.0,
skip_zeros=False,
) -> None:
# offset: 1
ct.c_float(eps),
ct.c_int32(step),
ct.c_float(lr),
get_ptr(qmap1),
get_ptr(qmap2),
get_ptr(absmax1),
get_ptr(absmax2),
ct.c_float(weight_decay),
ct.c_float(gnorm_scale),
ct.c_bool(skip_zeros),
ct.c_int32(g.numel()),
)
else:
raise ValueError(
f"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}"
)
post_call(prev_device)
===========changed ref 0===========
# module: bitsandbytes.functional
+ def dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'nf4')
+
===========changed ref 1===========
# module: bitsandbytes.functional
+ def dequantize_fp4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'fp4')
+
===========changed ref 2===========
# module: bitsandbytes.functional
+ def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
+
===========changed ref 3===========
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
+
===========changed ref 4===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 8===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 9===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 10===========
# module: bitsandbytes.functional
+ def prefetch_tensor(A, to_cpu=False):
+ assert A.is_paged, 'Only paged tensors can be prefetched!'
+ if to_cpu:
+ deviceid = -1
+ else:
+ deviceid = A.page_deviceid
+
+ num_bytes = dtype2bytes[A.dtype]*A.numel()
+ lib.cprefetch(get_ptr(A), ct.c_size_t(num_bytes), ct.c_int32(deviceid))
+
===========changed ref 11===========
# module: bitsandbytes.functional
+ def dequantize_4bit(A: Tensor,quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64, quant_type='fp4') -> Tensor:
+ """
+ Dequantizes FP4 blockwise quantized values.
+
+ Dequantizes the tensor A with maximum absolute values absmax in blocks of size blocksize.
+
+ Parameters
+ ----------
+ A : torch.Tensor
+ The input 8-bit tensor (packed 4-bit values).
+ quant_state : tuple(torch.Tensor, torch.Size, torch.dtype)
+ Tuple of absmax values, original tensor shape and original dtype.
+ absmax : torch.Tensor
+ The absmax values.
+ out : torch.Tensor
+ Dequantized output tensor.
+ blocksize : int
+ The blocksize used in quantization.
+ quant_type : str
+ The 4-bit quantization data type {fp4, nf4}
+
+
+ Returns
+ -------
+ torch.Tensor:
+ Dequantized tensor.
+ """
+ if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]:
+ raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048, 4096, 1024, 512, 256, 128, 64]")
+ if quant_type not in ['fp4', 'nf4']:
+ raise NotImplementedError(f'4-bit quantization data type {quant_type} is not implemented.')
+
+ if quant_state is None:
+ assert absmax is not None and out is not None
+ shape = out.shape
+ dtype = out.dtype
+ else:
+ absmax, shape, dtype, blocksize, compressed_stats, quant_type = quant_state
+
+
+ if compressed_stats is not None:
+ offset, state2 = compressed_stats
+ absmax = dequantize_blockwise(absmax, state2)
+ </s>
|
bitsandbytes.optim.optimizer/Optimizer8bit.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<3>:<add> self.is_paged = is_paged
<add> self.page_mng = F.GlobalPageManager.get_instance()
|
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
+ def __init__(self, params, defaults, optim_bits=32, is_paged=False):
- def __init__(self, params, defaults, optim_bits=32):
<0> super().__init__(params, defaults)
<1> self.initialized = False
<2> self.name2qmap = {}
<3>
<4> self.mng = GlobalOptimManager.get_instance()
<5> self.non_castable_tensor_keys = {
<6> "qmap1",
<7> "qmap2",
<8> "max1",
<9> "max2",
<10> "new_max1",
<11> "new_max2",
<12> "state1",
<13> "state2",
<14> "gnorm_vec",
<15> "absmax1",
<16> "absmax2",
<17> "unorm_vec",
<18> }
<19>
<20> if optim_bits == 8:
<21> self.fill_qmap()
<22>
|
===========changed ref 0===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 1===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 2===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 3===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 4===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ def dequantize_fp4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'fp4')
+
===========changed ref 8===========
# module: bitsandbytes.functional
+ def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
+
===========changed ref 9===========
# module: bitsandbytes.functional
+ def dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'nf4')
+
===========changed ref 10===========
# module: bitsandbytes.nn.modules
+ class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
+
===========changed ref 11===========
# module: bitsandbytes.nn.modules
+ class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
+
===========changed ref 12===========
# module: bitsandbytes.autograd._functions
+ def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
+ assert quant_state is not None
+ return MatMul4Bit.apply(A, B, out, bias, quant_state)
+
===========changed ref 13===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ @classmethod
+ def get_instance(cls):
+ if cls._instance is None:
+ cls._instance = cls.__new__(cls)
+ cls._instance.initialize()
+ return cls._instance
+
===========changed ref 14===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
+ self.error = False
===========changed ref 15===========
# module: bitsandbytes.optim.adam
+ class PagedAdam32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 16===========
# module: bitsandbytes.optim.adam
+ class PagedAdam8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 17===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 18===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 19===========
# module: bitsandbytes.optim.adam
+ class PagedAdam(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
|
bitsandbytes.optim.optimizer/Optimizer8bit.to_gpu
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<6>:<add> is_paged = getattr(v, 'is_paged', False)
<add> if not is_paged:
<add> self.state[p][k] = v.to(p.device)
<del> self.state[p][k] = v.to(p.device)
|
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
def to_gpu(self):
<0> for gindex, group in enumerate(self.param_groups):
<1> for pindex, p in enumerate(group["params"]):
<2> if p in self.state:
<3> values = self.state[p]
<4> for k, v in values.items():
<5> if isinstance(v, torch.Tensor):
<6> self.state[p][k] = v.to(p.device)
<7>
|
===========changed ref 0===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
+ def __init__(self, params, defaults, optim_bits=32, is_paged=False):
- def __init__(self, params, defaults, optim_bits=32):
super().__init__(params, defaults)
self.initialized = False
self.name2qmap = {}
+ self.is_paged = is_paged
+ self.page_mng = F.GlobalPageManager.get_instance()
self.mng = GlobalOptimManager.get_instance()
self.non_castable_tensor_keys = {
"qmap1",
"qmap2",
"max1",
"max2",
"new_max1",
"new_max2",
"state1",
"state2",
"gnorm_vec",
"absmax1",
"absmax2",
"unorm_vec",
}
if optim_bits == 8:
self.fill_qmap()
===========changed ref 1===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 2===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 3===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 4===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
+
===========changed ref 8===========
# module: bitsandbytes.functional
+ def dequantize_fp4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'fp4')
+
===========changed ref 9===========
# module: bitsandbytes.functional
+ def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
+
===========changed ref 10===========
# module: bitsandbytes.functional
+ def dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'nf4')
+
===========changed ref 11===========
# module: bitsandbytes.nn.modules
+ class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
+
===========changed ref 12===========
# module: bitsandbytes.nn.modules
+ class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
+
===========changed ref 13===========
# module: bitsandbytes.autograd._functions
+ def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
+ assert quant_state is not None
+ return MatMul4Bit.apply(A, B, out, bias, quant_state)
+
===========changed ref 14===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ @classmethod
+ def get_instance(cls):
+ if cls._instance is None:
+ cls._instance = cls.__new__(cls)
+ cls._instance.initialize()
+ return cls._instance
+
===========changed ref 15===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
+ self.error = False
===========changed ref 16===========
# module: bitsandbytes.optim.adam
+ class PagedAdam32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 17===========
# module: bitsandbytes.optim.adam
+ class PagedAdam8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 18===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 19===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
|
bitsandbytes.optim.optimizer/Optimizer8bit.step
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<18>:<add> #if self.is_paged: self.page_mng.prefetch_all()
<26>:<add> self.prefetch_state(p)
<27>:<add> torch.cuda.synchronize()
<add> if self.is_paged:
<add> # all paged operation are asynchronous, we need
<add> # to sync to make sure all tensors are in the right state
<add> torch.cuda.synchronize()
<add>
|
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
@torch.no_grad()
def step(self, closure=None):
<0> """Performs a single optimization step.
<1>
<2> Arguments:
<3> closure (callable, optional): A closure that reevaluates the model
<4> and returns the loss.
<5> """
<6> loss = None
<7> if closure is not None:
<8> with torch.enable_grad():
<9> loss = closure()
<10>
<11> overflows = []
<12>
<13> if not self.initialized:
<14> self.check_overrides()
<15> self.to_gpu() # needed for fairseq pure fp16 training
<16> self.initialized = True
<17>
<18> for gindex, group in enumerate(self.param_groups):
<19> for pindex, p in enumerate(group["params"]):
<20> if p.grad is None:
<21> continue
<22> state = self.state[p]
<23> if len(state) == 0:
<24> self.init_state(group, p, gindex, pindex)
<25>
<26> self.update_step(group, p, gindex, pindex)
<27>
<28> return loss
<29>
|
===========changed ref 0===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
def to_gpu(self):
for gindex, group in enumerate(self.param_groups):
for pindex, p in enumerate(group["params"]):
if p in self.state:
values = self.state[p]
for k, v in values.items():
if isinstance(v, torch.Tensor):
+ is_paged = getattr(v, 'is_paged', False)
+ if not is_paged:
+ self.state[p][k] = v.to(p.device)
- self.state[p][k] = v.to(p.device)
===========changed ref 1===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
+ def __init__(self, params, defaults, optim_bits=32, is_paged=False):
- def __init__(self, params, defaults, optim_bits=32):
super().__init__(params, defaults)
self.initialized = False
self.name2qmap = {}
+ self.is_paged = is_paged
+ self.page_mng = F.GlobalPageManager.get_instance()
self.mng = GlobalOptimManager.get_instance()
self.non_castable_tensor_keys = {
"qmap1",
"qmap2",
"max1",
"max2",
"new_max1",
"new_max2",
"state1",
"state2",
"gnorm_vec",
"absmax1",
"absmax2",
"unorm_vec",
}
if optim_bits == 8:
self.fill_qmap()
===========changed ref 2===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 3===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 4===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 8===========
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
+
===========changed ref 9===========
# module: bitsandbytes.functional
+ def dequantize_fp4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'fp4')
+
===========changed ref 10===========
# module: bitsandbytes.functional
+ def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
+
===========changed ref 11===========
# module: bitsandbytes.functional
+ def dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'nf4')
+
===========changed ref 12===========
# module: bitsandbytes.nn.modules
+ class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
+
===========changed ref 13===========
# module: bitsandbytes.nn.modules
+ class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
+
===========changed ref 14===========
# module: bitsandbytes.autograd._functions
+ def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
+ assert quant_state is not None
+ return MatMul4Bit.apply(A, B, out, bias, quant_state)
+
===========changed ref 15===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ @classmethod
+ def get_instance(cls):
+ if cls._instance is None:
+ cls._instance = cls.__new__(cls)
+ cls._instance.initialize()
+ return cls._instance
+
===========changed ref 16===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
+ self.error = False
===========changed ref 17===========
# module: bitsandbytes.optim.adam
+ class PagedAdam32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 18===========
# module: bitsandbytes.optim.adam
+ class PagedAdam8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 19===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
|
bitsandbytes.optim.optimizer/Optimizer2State.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<18>:<add> super().__init__(params, defaults, optim_bits, is_paged)
<del> super().__init__(params, defaults, optim_bits)
|
<s> optimizer_name,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0.0,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
max_unorm=0.0,
skip_zeros=False,
+ is_paged=False
):
<0> if not 0.0 <= lr:
<1> raise ValueError(f"Invalid learning rate: {lr}")
<2> if not 0.0 <= eps:
<3> raise ValueError(f"Invalid epsilon value: {eps}")
<4> if isinstance(betas, str):
<5> # format: '(beta1, beta2)'
<6> betas = betas.replace("(", "").replace(")", "").strip().split(",")
<7> betas = [float(b) for b in betas]
<8> for i in range(len(betas)):
<9> if not 0.0 <= betas[i] < 1.0:
<10> raise ValueError(
<11> f"Invalid beta parameter at index {i}: {betas[i]}"
<12> )
<13> if not 0.0 <= weight_decay:
<14> raise ValueError(
<15> f"Invalid weight_decay value: {weight_decay}"
<16> )
<17> defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
<18> super().__init__(params, defaults, optim_bits)
<19>
<20> if args is None:
<21> args = {}
<22> args["optim_bits"] = optim_bits
<23> args["percentile_clipping"] = 100
<24> args["min_8bit_size"] = min_8bit_size
<25> args["percentile_clipping"] = percentile_clipping
<26> args["block_wise"] = block_wise
<27> args["max_unorm"] = max_unorm
<28> args["skip_zeros"] = skip_</s>
|
===========below chunk 0===========
<s>
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0.0,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
max_unorm=0.0,
skip_zeros=False,
+ is_paged=False
):
# offset: 1
self.args = MockArgs(args)
else:
self.args = args
self.optimizer_name = optimizer_name
===========changed ref 0===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
+ def get_state_buffer(self, p, dtype=torch.float32):
+ if not self.is_paged or p.numel() < 1e5:
+ return torch.zeros_like(p, dtype=dtype, device=p.device)
+ else:
+ # > 1 MB
+ buff = F.get_paged(*p.shape, dtype=dtype, device=p.device)
+ F.fill(buff, 0)
+ self.page_mng.paged_tensors.append(buff)
+ return buff
+
===========changed ref 1===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
def to_gpu(self):
for gindex, group in enumerate(self.param_groups):
for pindex, p in enumerate(group["params"]):
if p in self.state:
values = self.state[p]
for k, v in values.items():
if isinstance(v, torch.Tensor):
+ is_paged = getattr(v, 'is_paged', False)
+ if not is_paged:
+ self.state[p][k] = v.to(p.device)
- self.state[p][k] = v.to(p.device)
===========changed ref 2===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
overflows = []
if not self.initialized:
self.check_overrides()
self.to_gpu() # needed for fairseq pure fp16 training
self.initialized = True
+ #if self.is_paged: self.page_mng.prefetch_all()
for gindex, group in enumerate(self.param_groups):
for pindex, p in enumerate(group["params"]):
if p.grad is None:
continue
state = self.state[p]
if len(state) == 0:
self.init_state(group, p, gindex, pindex)
+ self.prefetch_state(p)
self.update_step(group, p, gindex, pindex)
+ torch.cuda.synchronize()
+ if self.is_paged:
+ # all paged operation are asynchronous, we need
+ # to sync to make sure all tensors are in the right state
+ torch.cuda.synchronize()
+
return loss
===========changed ref 3===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
+ def __init__(self, params, defaults, optim_bits=32, is_paged=False):
- def __init__(self, params, defaults, optim_bits=32):
super().__init__(params, defaults)
self.initialized = False
self.name2qmap = {}
+ self.is_paged = is_paged
+ self.page_mng = F.GlobalPageManager.get_instance()
self.mng = GlobalOptimManager.get_instance()
self.non_castable_tensor_keys = {
"qmap1",
"qmap2",
"max1",
"max2",
"new_max1",
"new_max2",
"state1",
"state2",
"gnorm_vec",
"absmax1",
"absmax2",
"unorm_vec",
}
if optim_bits == 8:
self.fill_qmap()
===========changed ref 4===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 8===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 9===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 10===========
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
+
===========changed ref 11===========
# module: bitsandbytes.functional
+ def dequantize_fp4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'fp4')
+
===========changed ref 12===========
# module: bitsandbytes.functional
+ def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
+
===========changed ref 13===========
# module: bitsandbytes.functional
+ def dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'nf4')
+
===========changed ref 14===========
# module: bitsandbytes.nn.modules
+ class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
+
===========changed ref 15===========
# module: bitsandbytes.nn.modules
+ class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
+
===========changed ref 16===========
# module: bitsandbytes.autograd._functions
+ def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
+ assert quant_state is not None
+ return MatMul4Bit.apply(A, B, out, bias, quant_state)
+
|
bitsandbytes.optim.optimizer/Optimizer2State.init_state
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<20>:<del> state["state1"] = torch.zeros_like(
<21>:<del> p,
<22>:<del> memory_format=torch.preserve_format,
<23>:<del> dtype=torch.float32,
<24>:<del> device=p.device,
<25>:<del> )
<26>:<del> state["state2"] = torch.zeros_like(
<27>:<del> p,
<28>:<del> memory_format=torch.preserve_format,
<29>:<del> dtype=torch.float32,
<30>:<del> device=p.device,
<31>:<del> )
<32>:<add> state["state1"] = self.get_state_buffer(p, dtype=torch.float32)
<add> state["state2"] = self.get_state_buffer(p, dtype=torch.float32)
<43>:<del> state["state1"] = torch.zeros_like(
<44>:<del> p,
<45>:<del> memory_format=torch.preserve_format,
|
# module: bitsandbytes.optim.optimizer
class Optimizer2State(Optimizer8bit):
@torch.no_grad()
def init_state(self, group, p, gindex, pindex):
<0> config = self.get_config(gindex, pindex, group)
<1>
<2> if config["optim_bits"] == 32:
<3> dtype = torch.float32
<4> elif config["optim_bits"] == 8:
<5> dtype = torch.uint8
<6> else:
<7> raise NotImplementedError(
<8> f'Amount of optimizer bits not supported: {config["optim_bits"]}'
<9> )
<10>
<11> if p.numel() < config["min_8bit_size"]:
<12> dtype = torch.float32
<13>
<14> state = self.state[p]
<15> state["step"] = 0
<16>
<17> if dtype == torch.float32 or (
<18> dtype == torch.uint8 and p.numel() < 4096
<19> ):
<20> state["state1"] = torch.zeros_like(
<21> p,
<22> memory_format=torch.preserve_format,
<23> dtype=torch.float32,
<24> device=p.device,
<25> )
<26> state["state2"] = torch.zeros_like(
<27> p,
<28> memory_format=torch.preserve_format,
<29> dtype=torch.float32,
<30> device=p.device,
<31> )
<32> elif dtype == torch.uint8:
<33> if state["step"] == 0:
<34> if "dynamic" not in self.name2qmap:
<35> self.fill_qmap()
<36> self.name2qmap["dynamic"] = self.name2qmap["dynamic"].to(
<37> p.device
<38> )
<39> self.name2qmap["udynamic"] = self.name2qmap["udynamic"].to(
<40> p.device
<41> )
<42>
<43> state["state1"] = torch.zeros_like(
<44> p,
<45> memory_format=torch.preserve_format,</s>
|
===========below chunk 0===========
# module: bitsandbytes.optim.optimizer
class Optimizer2State(Optimizer8bit):
@torch.no_grad()
def init_state(self, group, p, gindex, pindex):
# offset: 1
device=p.device,
)
state["qmap1"] = self.name2qmap["dynamic"]
state["state2"] = torch.zeros_like(
p,
memory_format=torch.preserve_format,
dtype=torch.uint8,
device=p.device,
)
state["qmap2"] = self.name2qmap["udynamic"]
if config["block_wise"]:
n = p.numel()
blocks = n // 2048
blocks += 1 if n % 2048 > 0 else 0
state["absmax1"] = torch.zeros(
(blocks,), dtype=torch.float32, device=p.device
)
state["absmax2"] = torch.zeros(
(blocks,), dtype=torch.float32, device=p.device
)
else:
state["max1"] = torch.zeros(
(1,), dtype=torch.float32, device=p.device
)
state["new_max1"] = torch.zeros(
(1,), dtype=torch.float32, device=p.device
)
state["max2"] = torch.zeros(
(1,), dtype=torch.float32, device=p.device
)
state["new_max2"] = torch.zeros(
(1,), dtype=torch.float32, device=p.device
)
if config["percentile_clipping"] < 100:
state["gnorm_vec"] = torch.zeros((100,), device=p.device)
if config["max_unorm"] > 0.0:
state["unorm_vec"] = torch.zeros((1,), device=p.device)
===========changed ref 0===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
+ def prefetch_state(self, p):
+ if self.is_paged:
+ state = self.state[p]
+ s1 = state['state1']
+ is_paged = getattr(s1, 'is_paged', False)
+ if is_paged:
+ F.prefetch_tensor(state['state1'])
+ if 'state2' in state:
+ F.prefetch_tensor(state['state2'])
+
===========changed ref 1===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
+ def get_state_buffer(self, p, dtype=torch.float32):
+ if not self.is_paged or p.numel() < 1e5:
+ return torch.zeros_like(p, dtype=dtype, device=p.device)
+ else:
+ # > 1 MB
+ buff = F.get_paged(*p.shape, dtype=dtype, device=p.device)
+ F.fill(buff, 0)
+ self.page_mng.paged_tensors.append(buff)
+ return buff
+
===========changed ref 2===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
def to_gpu(self):
for gindex, group in enumerate(self.param_groups):
for pindex, p in enumerate(group["params"]):
if p in self.state:
values = self.state[p]
for k, v in values.items():
if isinstance(v, torch.Tensor):
+ is_paged = getattr(v, 'is_paged', False)
+ if not is_paged:
+ self.state[p][k] = v.to(p.device)
- self.state[p][k] = v.to(p.device)
===========changed ref 3===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
overflows = []
if not self.initialized:
self.check_overrides()
self.to_gpu() # needed for fairseq pure fp16 training
self.initialized = True
+ #if self.is_paged: self.page_mng.prefetch_all()
for gindex, group in enumerate(self.param_groups):
for pindex, p in enumerate(group["params"]):
if p.grad is None:
continue
state = self.state[p]
if len(state) == 0:
self.init_state(group, p, gindex, pindex)
+ self.prefetch_state(p)
self.update_step(group, p, gindex, pindex)
+ torch.cuda.synchronize()
+ if self.is_paged:
+ # all paged operation are asynchronous, we need
+ # to sync to make sure all tensors are in the right state
+ torch.cuda.synchronize()
+
return loss
===========changed ref 4===========
<s> optimizer_name,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0.0,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
max_unorm=0.0,
skip_zeros=False,
+ is_paged=False
):
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if isinstance(betas, str):
# format: '(beta1, beta2)'
betas = betas.replace("(", "").replace(")", "").strip().split(",")
betas = [float(b) for b in betas]
for i in range(len(betas)):
if not 0.0 <= betas[i] < 1.0:
raise ValueError(
f"Invalid beta parameter at index {i}: {betas[i]}"
)
if not 0.0 <= weight_decay:
raise ValueError(
f"Invalid weight_decay value: {weight_decay}"
)
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
+ super().__init__(params, defaults, optim_bits, is_paged)
- super().__init__(params, defaults, optim_bits)
if args is None:
args = {}
args["optim_bits"] = optim_bits
args["percentile_clipping"] = 100
args["min_8bit_size"] = min_8bit_size
args["percentile_clipping"] = percentile_clipping
args["block_wise"] = block_wise
args["max_unorm"] = max_unorm
args["skip_zeros"] = skip_zeros
self.</s>
|
bitsandbytes.optim.optimizer/Optimizer1State.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<14>:<add> super().__init__(params, defaults, optim_bits, is_paged)
<del> super().__init__(params, defaults, optim_bits)
|
<s> optimizer_name,
params,
lr=1e-3,
betas=(0.9, 0.0),
eps=1e-8,
weight_decay=0.0,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
max_unorm=0.0,
skip_zeros=False,
+ is_paged=False
):
<0> if not 0.0 <= lr:
<1> raise ValueError(f"Invalid learning rate: {lr}")
<2> if not 0.0 <= eps:
<3> raise ValueError(f"Invalid epsilon value: {eps}")
<4> for i in range(len(betas)):
<5> if not 0.0 <= betas[i] < 1.0:
<6> raise ValueError(
<7> f"Invalid beta parameter at index {i}: {betas[i]}"
<8> )
<9> if not 0.0 <= weight_decay:
<10> raise ValueError(
<11> f"Invalid weight_decay value: {weight_decay}"
<12> )
<13> defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
<14> super().__init__(params, defaults, optim_bits)
<15>
<16> if args is None:
<17> args = {}
<18> args["optim_bits"] = optim_bits
<19> args["percentile_clipping"] = 100
<20> args["min_8bit_size"] = min_8bit_size
<21> args["percentile_clipping"] = percentile_clipping
<22> args["block_wise"] = block_wise
<23> args["max_unorm"] = max_unorm
<24> args["skip_zeros"] = skip_zeros
<25>
<26> self.args = MockArgs(args)
<27> else:
<28> self.args = args
<29>
<30> self.optimizer_name = optimizer_name
<31>
|
===========changed ref 0===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
+ def prefetch_state(self, p):
+ if self.is_paged:
+ state = self.state[p]
+ s1 = state['state1']
+ is_paged = getattr(s1, 'is_paged', False)
+ if is_paged:
+ F.prefetch_tensor(state['state1'])
+ if 'state2' in state:
+ F.prefetch_tensor(state['state2'])
+
===========changed ref 1===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
+ def get_state_buffer(self, p, dtype=torch.float32):
+ if not self.is_paged or p.numel() < 1e5:
+ return torch.zeros_like(p, dtype=dtype, device=p.device)
+ else:
+ # > 1 MB
+ buff = F.get_paged(*p.shape, dtype=dtype, device=p.device)
+ F.fill(buff, 0)
+ self.page_mng.paged_tensors.append(buff)
+ return buff
+
===========changed ref 2===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
def to_gpu(self):
for gindex, group in enumerate(self.param_groups):
for pindex, p in enumerate(group["params"]):
if p in self.state:
values = self.state[p]
for k, v in values.items():
if isinstance(v, torch.Tensor):
+ is_paged = getattr(v, 'is_paged', False)
+ if not is_paged:
+ self.state[p][k] = v.to(p.device)
- self.state[p][k] = v.to(p.device)
===========changed ref 3===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
overflows = []
if not self.initialized:
self.check_overrides()
self.to_gpu() # needed for fairseq pure fp16 training
self.initialized = True
+ #if self.is_paged: self.page_mng.prefetch_all()
for gindex, group in enumerate(self.param_groups):
for pindex, p in enumerate(group["params"]):
if p.grad is None:
continue
state = self.state[p]
if len(state) == 0:
self.init_state(group, p, gindex, pindex)
+ self.prefetch_state(p)
self.update_step(group, p, gindex, pindex)
+ torch.cuda.synchronize()
+ if self.is_paged:
+ # all paged operation are asynchronous, we need
+ # to sync to make sure all tensors are in the right state
+ torch.cuda.synchronize()
+
return loss
===========changed ref 4===========
<s> optimizer_name,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0.0,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
max_unorm=0.0,
skip_zeros=False,
+ is_paged=False
):
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if isinstance(betas, str):
# format: '(beta1, beta2)'
betas = betas.replace("(", "").replace(")", "").strip().split(",")
betas = [float(b) for b in betas]
for i in range(len(betas)):
if not 0.0 <= betas[i] < 1.0:
raise ValueError(
f"Invalid beta parameter at index {i}: {betas[i]}"
)
if not 0.0 <= weight_decay:
raise ValueError(
f"Invalid weight_decay value: {weight_decay}"
)
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
+ super().__init__(params, defaults, optim_bits, is_paged)
- super().__init__(params, defaults, optim_bits)
if args is None:
args = {}
args["optim_bits"] = optim_bits
args["percentile_clipping"] = 100
args["min_8bit_size"] = min_8bit_size
args["percentile_clipping"] = percentile_clipping
args["block_wise"] = block_wise
args["max_unorm"] = max_unorm
args["skip_zeros"] = skip_zeros
self.</s>
===========changed ref 5===========
<s>
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0.0,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
max_unorm=0.0,
skip_zeros=False,
+ is_paged=False
):
# offset: 1
<s> args["max_unorm"] = max_unorm
args["skip_zeros"] = skip_zeros
self.args = MockArgs(args)
else:
self.args = args
self.optimizer_name = optimizer_name
===========changed ref 6===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
+ def __init__(self, params, defaults, optim_bits=32, is_paged=False):
- def __init__(self, params, defaults, optim_bits=32):
super().__init__(params, defaults)
self.initialized = False
self.name2qmap = {}
+ self.is_paged = is_paged
+ self.page_mng = F.GlobalPageManager.get_instance()
self.mng = GlobalOptimManager.get_instance()
self.non_castable_tensor_keys = {
"qmap1",
"qmap2",
"max1",
"max2",
"new_max1",
"new_max2",
"state1",
"state2",
"gnorm_vec",
"absmax1",
"absmax2",
"unorm_vec",
}
if optim_bits == 8:
self.fill_qmap()
===========changed ref 7===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 8===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 9===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 10===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
|
bitsandbytes.optim.optimizer/Optimizer1State.init_state
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<20>:<del> state["state1"] = torch.zeros_like(
<21>:<del> p,
<22>:<del> memory_format=torch.preserve_format,
<23>:<del> dtype=torch.float32,
<24>:<del> device=p.device,
<25>:<del> )
<26>:<add> state["state1"] = self.get_state_buffer(p, dtype=torch.float32)
<34>:<del> state["state1"] = torch.zeros_like(
<35>:<del> p,
<36>:<del> memory_format=torch.preserve_format,
<37>:<del> dtype=torch.uint8,
<38>:<del> device=p.device,
<39>:<del> )
<40>:<add> state["state1"] = self.get_state_buffer(p, dtype=torch.uint8)
|
# module: bitsandbytes.optim.optimizer
class Optimizer1State(Optimizer8bit):
@torch.no_grad()
def init_state(self, group, p, gindex, pindex):
<0> config = self.get_config(gindex, pindex, group)
<1>
<2> if config["optim_bits"] == 32:
<3> dtype = torch.float32
<4> elif config["optim_bits"] == 8:
<5> dtype = torch.uint8
<6> else:
<7> raise NotImplementedError(
<8> f'Amount of optimizer bits not supported: {config["optim_bits"]}'
<9> )
<10>
<11> if p.numel() < config["min_8bit_size"]:
<12> dtype = torch.float32
<13>
<14> state = self.state[p]
<15> state["step"] = 0
<16>
<17> if dtype == torch.float32 or (
<18> dtype == torch.uint8 and p.numel() < 4096
<19> ):
<20> state["state1"] = torch.zeros_like(
<21> p,
<22> memory_format=torch.preserve_format,
<23> dtype=torch.float32,
<24> device=p.device,
<25> )
<26> elif dtype == torch.uint8:
<27> if state["step"] == 0:
<28> if "dynamic" not in self.name2qmap:
<29> self.fill_qmap()
<30> self.name2qmap["dynamic"] = self.name2qmap["dynamic"].to(
<31> p.device
<32> )
<33>
<34> state["state1"] = torch.zeros_like(
<35> p,
<36> memory_format=torch.preserve_format,
<37> dtype=torch.uint8,
<38> device=p.device,
<39> )
<40> state["qmap1"] = self.name2qmap["dynamic"]
<41>
<42> if config["block_wise"]:
<43> n = p.numel()
<44> blocks = n // 2048
<45> blocks += 1 if n % 2048 > 0 else 0
<46>
<47> state["</s>
|
===========below chunk 0===========
# module: bitsandbytes.optim.optimizer
class Optimizer1State(Optimizer8bit):
@torch.no_grad()
def init_state(self, group, p, gindex, pindex):
# offset: 1
(blocks,), dtype=torch.float32, device=p.device
)
else:
state["max1"] = torch.zeros(
(1,), dtype=torch.float32, device=p.device
)
state["new_max1"] = torch.zeros(
(1,), dtype=torch.float32, device=p.device
)
if config["percentile_clipping"] < 100:
state["gnorm_vec"] = torch.zeros((100,), device=p.device)
if config["max_unorm"] > 0.0:
state["unorm_vec"] = torch.zeros((1,), device=p.device)
===========changed ref 0===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
+ def prefetch_state(self, p):
+ if self.is_paged:
+ state = self.state[p]
+ s1 = state['state1']
+ is_paged = getattr(s1, 'is_paged', False)
+ if is_paged:
+ F.prefetch_tensor(state['state1'])
+ if 'state2' in state:
+ F.prefetch_tensor(state['state2'])
+
===========changed ref 1===========
<s> optimizer_name,
params,
lr=1e-3,
betas=(0.9, 0.0),
eps=1e-8,
weight_decay=0.0,
optim_bits=32,
args=None,
min_8bit_size=4096,
percentile_clipping=100,
block_wise=True,
max_unorm=0.0,
skip_zeros=False,
+ is_paged=False
):
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
for i in range(len(betas)):
if not 0.0 <= betas[i] < 1.0:
raise ValueError(
f"Invalid beta parameter at index {i}: {betas[i]}"
)
if not 0.0 <= weight_decay:
raise ValueError(
f"Invalid weight_decay value: {weight_decay}"
)
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
+ super().__init__(params, defaults, optim_bits, is_paged)
- super().__init__(params, defaults, optim_bits)
if args is None:
args = {}
args["optim_bits"] = optim_bits
args["percentile_clipping"] = 100
args["min_8bit_size"] = min_8bit_size
args["percentile_clipping"] = percentile_clipping
args["block_wise"] = block_wise
args["max_unorm"] = max_unorm
args["skip_zeros"] = skip_zeros
self.args = MockArgs(args)
else:
self.args = args
self.optimizer_name = optimizer_name
===========changed ref 2===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
+ def get_state_buffer(self, p, dtype=torch.float32):
+ if not self.is_paged or p.numel() < 1e5:
+ return torch.zeros_like(p, dtype=dtype, device=p.device)
+ else:
+ # > 1 MB
+ buff = F.get_paged(*p.shape, dtype=dtype, device=p.device)
+ F.fill(buff, 0)
+ self.page_mng.paged_tensors.append(buff)
+ return buff
+
===========changed ref 3===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
def to_gpu(self):
for gindex, group in enumerate(self.param_groups):
for pindex, p in enumerate(group["params"]):
if p in self.state:
values = self.state[p]
for k, v in values.items():
if isinstance(v, torch.Tensor):
+ is_paged = getattr(v, 'is_paged', False)
+ if not is_paged:
+ self.state[p][k] = v.to(p.device)
- self.state[p][k] = v.to(p.device)
===========changed ref 4===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
overflows = []
if not self.initialized:
self.check_overrides()
self.to_gpu() # needed for fairseq pure fp16 training
self.initialized = True
+ #if self.is_paged: self.page_mng.prefetch_all()
for gindex, group in enumerate(self.param_groups):
for pindex, p in enumerate(group["params"]):
if p.grad is None:
continue
state = self.state[p]
if len(state) == 0:
self.init_state(group, p, gindex, pindex)
+ self.prefetch_state(p)
self.update_step(group, p, gindex, pindex)
+ torch.cuda.synchronize()
+ if self.is_paged:
+ # all paged operation are asynchronous, we need
+ # to sync to make sure all tensors are in the right state
+ torch.cuda.synchronize()
+
return loss
|
tests.test_autograd/test_matmullt
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<s>.mark.parametrize(
"dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, decomp, has_fp16_weights, has_bias",
values,
ids=names,
)
def test_matmullt(
dim1,
dim2,
dim3,
dim4,
funcs,
dtype,
req_grad,
transpose,
decomp,
has_fp16_weights,
has_bias
):
<0> if not torch.cuda.is_available(): pytest.skip('No GPU found.')
<1> dimA = (dim2, dim3) if not transpose[0] else (dim3, dim2)
<2> dimB = (dim3, dim4) if not transpose[1] else (dim4, dim3)
<3> outlier_dim = torch.randint(0, dimA[1], size=(dimA[1] // 8,), device="cuda")
<4> if has_bias == False:
<5> req_grad = list(req_grad)
<6> req_grad[2] = False
<7>
<8> for i in range(k):
<9>
<10> # normal multiply
<11> if funcs[0] in [torch.mm, torch.matmul]:
<12> A = torch.randn(
<13> size=dimA, device="cuda", requires_grad=req_grad[0], dtype=dtype
<14> )
<15> if decomp == 6.0:
<16> with torch.no_grad():
<17> A[:, outlier_dim] = 6.0
<18> B = torch.randn(
<19> size=dimB, device="cuda", requires_grad=req_grad[1], dtype=dtype
<20> )
<21> target = torch.randn(
<22> size=(dim2, dim4),
<23> device="cuda",
<24> requires_grad=req_grad[1],
<25> dtype=dtype,
<26> )
<27> bias = None
<28> bias2 = None
<29> if has_bias:
<30> bias = torch</s>
|
===========below chunk 0===========
<s>ize(
"dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, decomp, has_fp16_weights, has_bias",
values,
ids=names,
)
def test_matmullt(
dim1,
dim2,
dim3,
dim4,
funcs,
dtype,
req_grad,
transpose,
decomp,
has_fp16_weights,
has_bias
):
# offset: 1
bias2 = bias.clone()
torch.nn.init.xavier_uniform_(B)
B2 = B.clone()
state = bnb.MatmulLtState()
state.threshold = decomp
state.has_fp16_weights = has_fp16_weights
if not has_fp16_weights:
if not transpose[0] and not transpose[1]:
B2 = B2.t().contiguous()
(
state.CB,
CBt,
state.SCB,
SCBt,
coo_tensorB,
) = bnb.functional.double_quant(B2.to(torch.float16))
B2 = state.CB
if not transpose[0] and transpose[1]:
out_torch = funcs[0](A, B.t())
out_bnb = funcs[1](A, B2, state=state, bias=bias2)
elif not transpose[0] and not transpose[1]:
out_torch = funcs[0](A, B)
out_bnb = funcs[1](A, B2.t(), state=state, bias=bias2)
if has_bias:
out_torch += bias
assert out_bnb.dtype == A.dtype, f"bnb matmullt received {A.dtype} but returned {out_bnb.dtype}"
n = out_bnb.numel()
err = torch.abs(out_bnb - out_torch).mean().item()
# print</s>
===========below chunk 1===========
<s>ize(
"dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, decomp, has_fp16_weights, has_bias",
values,
ids=names,
)
def test_matmullt(
dim1,
dim2,
dim3,
dim4,
funcs,
dtype,
req_grad,
transpose,
decomp,
has_fp16_weights,
has_bias
):
# offset: 2
<s>.numel()
err = torch.abs(out_bnb - out_torch).mean().item()
# print(f'abs error {err:.4f}')
idx = torch.isclose(out_bnb, out_torch, atol=0.01, rtol=0.1)
assert (idx == 0).sum().item() <= n * (0.0175 if dtype == torch.float16 else 0.021)
idx = torch.isclose(out_bnb, out_torch, atol=0.035, rtol=0.2)
assert (idx == 0).sum().item() <= n * 0.001
if has_fp16_weights:
if any(req_grad):
out_bnb.data.copy_(out_torch)
torch.cuda.synchronize()
loss_bnb = torch.nn.functional.mse_loss(
out_bnb, target
).mean()
loss_bnb.backward()
gradA1 = A.grad
gradB1 = B.grad
A.grad = None
B.grad = None
if has_bias:
gradBias1 = bias.grad
bias.grad = None
loss_torch = torch.nn.functional.mse_loss(
out_torch, target
).mean()
loss_torch.backward()
grad</s>
===========below chunk 2===========
<s>ize(
"dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, decomp, has_fp16_weights, has_bias",
values,
ids=names,
)
def test_matmullt(
dim1,
dim2,
dim3,
dim4,
funcs,
dtype,
req_grad,
transpose,
decomp,
has_fp16_weights,
has_bias
):
# offset: 3
<s> = A.grad
gradB2 = B.grad
A.grad = None
B.grad = None
if has_bias:
gradBias2 = bias.grad
bias.grad = None
if req_grad[0]:
torch.testing.assert_allclose(
gradA1, gradA2, atol=0.015, rtol=0.1
)
if req_grad[1]:
n = gradB1.numel()
if dim2 > 0:
assert torch.abs(gradB1).sum() > 0.0
assert torch.abs(gradB2).sum() > 0.0
else:
assert torch.abs(gradB1).sum() == 0.0
assert torch.abs(gradB2).sum() == 0.0
idx = torch.isclose(gradB1, gradB2, atol=0.06, rtol=0.3)
assert (idx == 0).sum().item() <= n * 0.1
idx = torch.isclose(gradB1, gradB2, atol=0.10, rtol=0.3)
assert (idx == 0).sum().item() <= n * 0.02
torch.testing.assert_allclose(
gradB1, gradB2, atol=0.18, rtol=0.3
)
if req_grad[2]:
torch.testing.assert_
===========changed ref 0===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 1===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 2===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 3===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 4===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ def dequantize_fp4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'fp4')
+
===========changed ref 8===========
# module: bitsandbytes.functional
+ def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
+
|
|
tests.test_autograd/test_matmul_fp8
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<11>:<add>
|
# module: tests.test_autograd
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
@pytest.mark.parametrize( "dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose", values, ids=names)
def test_matmul_fp8( dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose):
<0> dimA = (dim2, dim3) if not transpose[0] else (dim3, dim2)
<1> dimB = (dim3, dim4) if not transpose[1] else (dim4, dim3)
<2> req_grad = list(req_grad)
<3> req_grad[2] = False
<4>
<5> for i in range(k):
<6> # normal multiply
<7> if funcs[0] in [torch.mm, torch.matmul]:
<8> A = torch.randn(size=dimA, device="cuda", requires_grad=req_grad[0], dtype=dtype)
<9> B = torch.randn(size=dimB, device="cuda", requires_grad=req_grad[1], dtype=dtype)
<10> target = torch.randn(size=(dim2, dim4), device="cuda", requires_grad=req_grad[1], dtype=dtype)
<11> torch.nn.init.xavier_uniform_(B)
<12>
<13> fw_code = bnb.functional.create_fp8_map(True, 4, 3, 8).to(A.device)
<14> bw_code = bnb.functional.create_fp8_map(True, 5, 2, 8).to(A.device)
<15>
<16> if not transpose[0] and transpose[1]:
<17> out_torch = funcs[0](A, B.t())
<18> out_bnb = funcs[1](A, B.t(), fw_code, bw_code)
<19> elif not transpose[0] and not transpose[1]:
<20> out_torch = funcs[0](A, B)
</s>
|
===========below chunk 0===========
# module: tests.test_autograd
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
@pytest.mark.parametrize( "dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose", values, ids=names)
def test_matmul_fp8( dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose):
# offset: 1
assert out_bnb.dtype == A.dtype, f"bnb matmullt received {A.dtype} but returned {out_bnb.dtype}"
n = out_bnb.numel()
err = torch.abs(out_bnb - out_torch).float().mean().item()
if n > 0:
assert err < 0.20
if any(req_grad):
out_bnb.data.copy_(out_torch)
torch.cuda.synchronize()
loss_bnb = torch.nn.functional.mse_loss(out_bnb, target).mean()
loss_bnb.backward()
gradA1 = A.grad
gradB1 = B.grad
A.grad = None
B.grad = None
loss_torch = torch.nn.functional.mse_loss( out_torch, target ).mean()
loss_torch.backward()
gradA2 = A.grad
gradB2 = B.grad
A.grad = None
B.grad = None
if req_grad[0]:
torch.testing.assert_allclose( gradA1, gradA2, atol=0.015, rtol=0.1)
if req_grad[1]:
n = gradB1.numel()
if dim2 > 0:
assert torch.abs(gradB1).sum() > 0.0
assert torch.abs(gradB2).sum() > 0.0
else:
assert torch.abs(gradB1).sum() == 0.0</s>
===========below chunk 1===========
# module: tests.test_autograd
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
@pytest.mark.parametrize( "dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose", values, ids=names)
def test_matmul_fp8( dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose):
# offset: 2
<s>B2).sum() > 0.0
else:
assert torch.abs(gradB1).sum() == 0.0
assert torch.abs(gradB2).sum() == 0.0
idx = torch.isclose(gradB1, gradB2, atol=0.06, rtol=0.3)
assert (idx == 0).sum().item() <= n * 0.1
idx = torch.isclose(gradB1, gradB2, atol=0.10, rtol=0.3)
assert (idx == 0).sum().item() <= n * 0.02
grad_err = (gradB1-gradB2).abs().mean()
assert grad_err.item() < 0.003
torch.testing.assert_allclose(
gradB1, gradB2, atol=0.18, rtol=0.3
)
===========changed ref 0===========
# module: tests.test_autograd
n = 1
k = 3
dim1 = torch.randint(16, 64, size=(n,)).tolist()
dim2 = torch.randint(32, 96, size=(n,)).tolist()
dim3 = torch.randint(32, 96, size=(n,)).tolist()
dim4 = torch.randint(32, 96, size=(n,)).tolist()
dim2.append(0)
+
+ funcs = [(torch.matmul, bnb.matmul_4bit)]
+ str_funcs = ["matmul"]
+ req_grad = list(product([True, False], repeat=3))
+ req_grad_str = []
+ for c in req_grad:
+ strval = ''
+ for v in c:
+ if v == True: strval += 'T'
+ else: strval += 'F'
+ req_grad_str.append(strval)
+
+ transpose = [(False, True), (False, False)]
+ str_transpose = ["NT", "NN"]
+ dtype = [torch.float16, torch.float32]
+ compress_statistics = [False, True]
+ has_fp16_weights = [True, False]
+ has_bias = [True, False]
+ quant_type = ['fp4', 'nf4']
+ values = list(product(dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, has_bias, compress_statistics, quant_type))
+ str_values = list(product(dim1, dim2, dim3, dim4, str_funcs, dtype, req_grad_str, str_transpose, has_bias, compress_statistics, quant_type))
+ names = ["dim1_{}_dim2_{}_dim3_{}_dim4_{}_func_{}_dtype_{}_requires_grad_{}_transpose_{}_has_bias_{}_compress_statistics_{}_quant_type_{}".format(*vals) for vals in str_values]
funcs = [(torch.matmul, bnb.research.matmul_fp</s>
===========changed ref 1===========
# module: tests.test_autograd
# offset: 1
<s>) for vals in str_values]
funcs = [(torch.matmul, bnb.research.matmul_fp8_mixed), (torch.matmul, bnb.research.matmul_fp8_global)]
str_funcs = ["matmul_fp8_mixed", 'matmul_fp8_global']
req_grad = list(product([True, False], repeat=3))
req_grad_str = []
for c in req_grad:
strval = ''
for v in c:
if v == True: strval += 'T'
else: strval += 'F'
req_grad_str.append(strval)
transpose = [(False, True), (False, False)]
str_transpose = ["NT", "NN"]
dtype = [torch.float16, torch.float32]
has_fp16_weights = [True, False]
values = list(product(dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose))
str_values = list(product(dim1, dim2, dim3, dim4, str_funcs, dtype, req_grad_str, str_transpose))
names = ["dim1_{}_dim2_{}_dim3_{}_dim4_{}_func_{}_dtype_{}_requires_grad_{}_transpose_{}".format(*vals) for vals in str_values]
===========changed ref 2===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 3===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 4===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
|
tests.test_functional/assert_all_approx_close
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<3>:<add> if throw:
<add> print(f"Too many values not close: assert {sumval} < {count}")
<del> print(f"Too many values not close: assert {sumval} < {count}")
<4>:<add> torch.testing.assert_close(a, b, rtol, atol)
<del> torch.testing.assert_allclose(a, b, rtol, atol)
|
# module: tests.test_functional
+ def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0, throw=True):
- def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0):
<0> idx = torch.isclose(a, b, rtol, atol)
<1> sumval = (idx == 0).sum().item()
<2> if sumval > count:
<3> print(f"Too many values not close: assert {sumval} < {count}")
<4> torch.testing.assert_allclose(a, b, rtol, atol)
<5>
|
===========changed ref 0===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 1===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 2===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 3===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 4===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ def dequantize_fp4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'fp4')
+
===========changed ref 8===========
# module: bitsandbytes.functional
+ def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
+
===========changed ref 9===========
# module: bitsandbytes.functional
+ def dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'nf4')
+
===========changed ref 10===========
# module: bitsandbytes.nn.modules
+ class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
+
===========changed ref 11===========
# module: bitsandbytes.nn.modules
+ class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
+
===========changed ref 12===========
# module: bitsandbytes.autograd._functions
+ def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
+ assert quant_state is not None
+ return MatMul4Bit.apply(A, B, out, bias, quant_state)
+
===========changed ref 13===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ @classmethod
+ def get_instance(cls):
+ if cls._instance is None:
+ cls._instance = cls.__new__(cls)
+ cls._instance.initialize()
+ return cls._instance
+
===========changed ref 14===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
+ self.error = False
===========changed ref 15===========
# module: bitsandbytes.optim.adam
+ class PagedAdam32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 16===========
# module: bitsandbytes.optim.adam
+ class PagedAdam8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 17===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 18===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 19===========
# module: bitsandbytes.optim.adam
+ class PagedAdam(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
|
tests.test_functional/test_estimate_quantiles
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<5>:<add> torch.testing.assert_close(percs, code, atol=1e-3, rtol=1e-2)
<del> torch.testing.assert_allclose(percs, code, atol=1e-3, rtol=1e-2)
|
# module: tests.test_functional
@pytest.mark.parametrize(
"dtype", [torch.float32, torch.float16], ids=["float", "half"]
)
def test_estimate_quantiles(dtype):
<0> A = torch.rand(1024, 1024, device="cuda")
<1> A = A.to(dtype)
<2> code = F.estimate_quantiles(A)
<3>
<4> percs = torch.linspace(1 / 512, 511 / 512, 256, device=A.device)
<5> torch.testing.assert_allclose(percs, code, atol=1e-3, rtol=1e-2)
<6>
<7> A = torch.randn(1024, 1024, device="cuda")
<8> A = A.to(dtype)
<9> code = F.estimate_quantiles(A)
<10>
<11> quantiles = torch.quantile(A.float(), percs)
<12> diff = torch.abs(code - quantiles)
<13> assert (diff > 5e-02).sum().item() == 0
<14>
|
===========changed ref 0===========
# module: tests.test_functional
+ def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0, throw=True):
- def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0):
idx = torch.isclose(a, b, rtol, atol)
sumval = (idx == 0).sum().item()
if sumval > count:
+ if throw:
+ print(f"Too many values not close: assert {sumval} < {count}")
- print(f"Too many values not close: assert {sumval} < {count}")
+ torch.testing.assert_close(a, b, rtol, atol)
- torch.testing.assert_allclose(a, b, rtol, atol)
===========changed ref 1===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 2===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 3===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 4===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
+
===========changed ref 8===========
# module: bitsandbytes.functional
+ def dequantize_fp4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'fp4')
+
===========changed ref 9===========
# module: bitsandbytes.functional
+ def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
+
===========changed ref 10===========
# module: bitsandbytes.functional
+ def dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'nf4')
+
===========changed ref 11===========
# module: bitsandbytes.nn.modules
+ class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
+
===========changed ref 12===========
# module: bitsandbytes.nn.modules
+ class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
+
===========changed ref 13===========
# module: bitsandbytes.autograd._functions
+ def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
+ assert quant_state is not None
+ return MatMul4Bit.apply(A, B, out, bias, quant_state)
+
===========changed ref 14===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ @classmethod
+ def get_instance(cls):
+ if cls._instance is None:
+ cls._instance = cls.__new__(cls)
+ cls._instance.initialize()
+ return cls._instance
+
===========changed ref 15===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
+ self.error = False
===========changed ref 16===========
# module: bitsandbytes.optim.adam
+ class PagedAdam32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 17===========
# module: bitsandbytes.optim.adam
+ class PagedAdam8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 18===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 19===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
|
tests.test_functional/test_quantile_quantization
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<13>:<add> torch.testing.assert_close(A1, A2, atol=5e-3, rtol=0)
<del> torch.testing.assert_allclose(A1, A2, atol=5e-3, rtol=0)
|
# module: tests.test_functional
def test_quantile_quantization():
<0> for i in range(100):
<1> A1 = torch.randn(1024, 1024, device="cuda")
<2> code = F.estimate_quantiles(A1)
<3> C = F.quantize_no_absmax(A1, code)
<4> A2 = F.dequantize_no_absmax(C, code)
<5> diff = torch.abs(A1 - A2).mean().item()
<6> assert diff < 0.0075
<7>
<8> A1 = torch.rand(1024, 1024, device="cuda")
<9> code = F.estimate_quantiles(A1)
<10> C = F.quantize_no_absmax(A1, code)
<11> A2 = F.dequantize_no_absmax(C, code)
<12> diff = torch.abs(A1 - A2).mean().item()
<13> torch.testing.assert_allclose(A1, A2, atol=5e-3, rtol=0)
<14> assert diff < 0.001
<15>
|
===========changed ref 0===========
# module: tests.test_functional
+ def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0, throw=True):
- def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0):
idx = torch.isclose(a, b, rtol, atol)
sumval = (idx == 0).sum().item()
if sumval > count:
+ if throw:
+ print(f"Too many values not close: assert {sumval} < {count}")
- print(f"Too many values not close: assert {sumval} < {count}")
+ torch.testing.assert_close(a, b, rtol, atol)
- torch.testing.assert_allclose(a, b, rtol, atol)
===========changed ref 1===========
# module: tests.test_functional
@pytest.mark.parametrize(
"dtype", [torch.float32, torch.float16], ids=["float", "half"]
)
def test_estimate_quantiles(dtype):
A = torch.rand(1024, 1024, device="cuda")
A = A.to(dtype)
code = F.estimate_quantiles(A)
percs = torch.linspace(1 / 512, 511 / 512, 256, device=A.device)
+ torch.testing.assert_close(percs, code, atol=1e-3, rtol=1e-2)
- torch.testing.assert_allclose(percs, code, atol=1e-3, rtol=1e-2)
A = torch.randn(1024, 1024, device="cuda")
A = A.to(dtype)
code = F.estimate_quantiles(A)
quantiles = torch.quantile(A.float(), percs)
diff = torch.abs(code - quantiles)
assert (diff > 5e-02).sum().item() == 0
===========changed ref 2===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 3===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 4===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 8===========
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
+
===========changed ref 9===========
# module: bitsandbytes.functional
+ def dequantize_fp4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'fp4')
+
===========changed ref 10===========
# module: bitsandbytes.functional
+ def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
+
===========changed ref 11===========
# module: bitsandbytes.functional
+ def dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'nf4')
+
===========changed ref 12===========
# module: bitsandbytes.nn.modules
+ class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
+
===========changed ref 13===========
# module: bitsandbytes.nn.modules
+ class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
+
===========changed ref 14===========
# module: bitsandbytes.autograd._functions
+ def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
+ assert quant_state is not None
+ return MatMul4Bit.apply(A, B, out, bias, quant_state)
+
===========changed ref 15===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ @classmethod
+ def get_instance(cls):
+ if cls._instance is None:
+ cls._instance = cls.__new__(cls)
+ cls._instance.initialize()
+ return cls._instance
+
===========changed ref 16===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
+ self.error = False
===========changed ref 17===========
# module: bitsandbytes.optim.adam
+ class PagedAdam32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 18===========
# module: bitsandbytes.optim.adam
+ class PagedAdam8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
|
tests.test_functional/test_dynamic_quantization
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<19>:<add> torch.testing.assert_close(A1, A2, atol=1e-2, rtol=0)
<del> torch.testing.assert_allclose(A1, A2, atol=1e-2, rtol=0)
|
# module: tests.test_functional
def test_dynamic_quantization():
<0> diffs = []
<1> reldiffs = []
<2> for i in range(100):
<3> A1 = torch.randn(1024, 1024, device="cuda")
<4> C, S = F.quantize(A1)
<5> A2 = F.dequantize(C, S)
<6> diff = torch.abs(A1 - A2)
<7> reldiff = diff / torch.abs(A1 + 1e-8)
<8> diffs.append(diff.mean().item())
<9> reldiffs.append(reldiff.mean().item())
<10> assert diff.mean().item() < 0.0135
<11> # print(sum(diffs)/len(diffs))
<12> # print(sum(reldiffs)/len(reldiffs))
<13>
<14> for i in range(100):
<15> A1 = torch.rand(1024, 1024, device="cuda")
<16> C, S = F.quantize(A1)
<17> A2 = F.dequantize(C, S)
<18> diff = torch.abs(A1 - A2).mean().item()
<19> torch.testing.assert_allclose(A1, A2, atol=1e-2, rtol=0)
<20> assert diff < 0.004
<21>
|
===========changed ref 0===========
# module: tests.test_functional
+ def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0, throw=True):
- def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0):
idx = torch.isclose(a, b, rtol, atol)
sumval = (idx == 0).sum().item()
if sumval > count:
+ if throw:
+ print(f"Too many values not close: assert {sumval} < {count}")
- print(f"Too many values not close: assert {sumval} < {count}")
+ torch.testing.assert_close(a, b, rtol, atol)
- torch.testing.assert_allclose(a, b, rtol, atol)
===========changed ref 1===========
# module: tests.test_functional
@pytest.mark.parametrize(
"dtype", [torch.float32, torch.float16], ids=["float", "half"]
)
def test_estimate_quantiles(dtype):
A = torch.rand(1024, 1024, device="cuda")
A = A.to(dtype)
code = F.estimate_quantiles(A)
percs = torch.linspace(1 / 512, 511 / 512, 256, device=A.device)
+ torch.testing.assert_close(percs, code, atol=1e-3, rtol=1e-2)
- torch.testing.assert_allclose(percs, code, atol=1e-3, rtol=1e-2)
A = torch.randn(1024, 1024, device="cuda")
A = A.to(dtype)
code = F.estimate_quantiles(A)
quantiles = torch.quantile(A.float(), percs)
diff = torch.abs(code - quantiles)
assert (diff > 5e-02).sum().item() == 0
===========changed ref 2===========
# module: tests.test_functional
def test_quantile_quantization():
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
code = F.estimate_quantiles(A1)
C = F.quantize_no_absmax(A1, code)
A2 = F.dequantize_no_absmax(C, code)
diff = torch.abs(A1 - A2).mean().item()
assert diff < 0.0075
A1 = torch.rand(1024, 1024, device="cuda")
code = F.estimate_quantiles(A1)
C = F.quantize_no_absmax(A1, code)
A2 = F.dequantize_no_absmax(C, code)
diff = torch.abs(A1 - A2).mean().item()
+ torch.testing.assert_close(A1, A2, atol=5e-3, rtol=0)
- torch.testing.assert_allclose(A1, A2, atol=5e-3, rtol=0)
assert diff < 0.001
===========changed ref 3===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 4===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 8===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 9===========
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
+
===========changed ref 10===========
# module: bitsandbytes.functional
+ def dequantize_fp4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'fp4')
+
===========changed ref 11===========
# module: bitsandbytes.functional
+ def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
+
===========changed ref 12===========
# module: bitsandbytes.functional
+ def dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'nf4')
+
===========changed ref 13===========
# module: bitsandbytes.nn.modules
+ class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
+
===========changed ref 14===========
# module: bitsandbytes.nn.modules
+ class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
+
===========changed ref 15===========
# module: bitsandbytes.autograd._functions
+ def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
+ assert quant_state is not None
+ return MatMul4Bit.apply(A, B, out, bias, quant_state)
+
===========changed ref 16===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ @classmethod
+ def get_instance(cls):
+ if cls._instance is None:
+ cls._instance = cls.__new__(cls)
+ cls._instance.initialize()
+ return cls._instance
+
===========changed ref 17===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
+ self.error = False
===========changed ref 18===========
# module: bitsandbytes.optim.adam
+ class PagedAdam32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
|
tests.test_functional/test_dynamic_blockwise_quantization
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<1>:<del> for blocksize in [4096, 2048, 1024, 512]:
<2>:<add> diffs = []
<del> diffs = []
<3>:<add> reldiffs = []
<del> reldiffs = []
<4>:<add> for i in range(100):
<del> for i in range(100):
<5>:<add> A1 = torch.randn(1024, 1024, device="cuda")
<del> A1 = torch.randn(1024, 1024, device="cuda")
<6>:<add> C, S = F.quantize_blockwise(A1, blocksize=blocksize, nested=nested)
<del> C, S = F.quantize_blockwise(A1, blocksize=blocksize)
<7>:<add> A2 = F.dequantize_blockwise(C, S)
<del> A2 = F.dequantize_blockwise(C, S, blocksize=blocksize)
<8>:<add> diff = torch.abs(A1 - A2)
<del> diff = torch.abs(A1 - A2)
<9>:<add> reldiff = diff / torch.abs(A1 + 1e-8)
<del> reldiff = diff / torch.abs(A1 + 1e-8)
<10>:<add> diffs.append(diff.mean().item())
<del> diffs.append(diff.mean().item())
<11>:<add> reldiffs.append(reldiff.mean().item())
<del> reldiffs.append(reldiff.mean().item())
<12>:<add> abserr = sum(diffs)/len(diffs)
<del> abserr = sum(diffs)/len(diffs)
<13>:<add> relerr = sum(reldiffs)/len(reldiffs)
<del> relerr = sum(reldiffs)/len(reldiffs)
<14>:<add> assert abserr < 0.011
<del> assert abserr < 0.011
<15>:<add> assert relerr < 0.018
<del> assert relerr < 0.018
<16>:<add> #print('nested=
|
# module: tests.test_functional
+ @pytest.mark.parametrize("nested", [False, True], ids=["False", "True"])
+ @pytest.mark.parametrize("blocksize", [4096, 2048, 1024, 512, 256, 128, 64])
+ def test_dynamic_blockwise_quantization(nested, blocksize):
- def test_dynamic_blockwise_quantization():
<0> #print('')
<1> for blocksize in [4096, 2048, 1024, 512]:
<2> diffs = []
<3> reldiffs = []
<4> for i in range(100):
<5> A1 = torch.randn(1024, 1024, device="cuda")
<6> C, S = F.quantize_blockwise(A1, blocksize=blocksize)
<7> A2 = F.dequantize_blockwise(C, S, blocksize=blocksize)
<8> diff = torch.abs(A1 - A2)
<9> reldiff = diff / torch.abs(A1 + 1e-8)
<10> diffs.append(diff.mean().item())
<11> reldiffs.append(reldiff.mean().item())
<12> abserr = sum(diffs)/len(diffs)
<13> relerr = sum(reldiffs)/len(reldiffs)
<14> assert abserr < 0.011
<15> assert relerr < 0.018
<16> #print('randn', blocksize, sum(diffs)/len(diffs))
<17> #print('randn', blocksize, sum(reldiffs)/len(reldiffs))
<18>
<19> diffs = []
<20> for i in range(100):
<21> A1 = torch.rand(1024, 1024, device="cuda")
<22> C, S = F.quantize_blockwise(A1, blocksize=blocksize)
<23> A2 = F.dequantize_blockwise(C, S, blocksize=blocksize)
<24> diff = torch.abs(A1 - A2)
<25> reldiff = diff / torch.abs(A1 + 1e-8)
<26> </s>
|
===========below chunk 0===========
# module: tests.test_functional
+ @pytest.mark.parametrize("nested", [False, True], ids=["False", "True"])
+ @pytest.mark.parametrize("blocksize", [4096, 2048, 1024, 512, 256, 128, 64])
+ def test_dynamic_blockwise_quantization(nested, blocksize):
- def test_dynamic_blockwise_quantization():
# offset: 1
reldiffs.append(reldiff.mean().item())
#torch.testing.assert_allclose(A1, A2, atol=1e-2, rtol=0)
abserr = sum(diffs)/len(diffs)
relerr = sum(reldiffs)/len(reldiffs)
assert abserr < 0.0035
assert relerr < 0.015
===========changed ref 0===========
# module: tests.test_functional
+ def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0, throw=True):
- def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0):
idx = torch.isclose(a, b, rtol, atol)
sumval = (idx == 0).sum().item()
if sumval > count:
+ if throw:
+ print(f"Too many values not close: assert {sumval} < {count}")
- print(f"Too many values not close: assert {sumval} < {count}")
+ torch.testing.assert_close(a, b, rtol, atol)
- torch.testing.assert_allclose(a, b, rtol, atol)
===========changed ref 1===========
# module: tests.test_functional
@pytest.mark.parametrize(
"dtype", [torch.float32, torch.float16], ids=["float", "half"]
)
def test_estimate_quantiles(dtype):
A = torch.rand(1024, 1024, device="cuda")
A = A.to(dtype)
code = F.estimate_quantiles(A)
percs = torch.linspace(1 / 512, 511 / 512, 256, device=A.device)
+ torch.testing.assert_close(percs, code, atol=1e-3, rtol=1e-2)
- torch.testing.assert_allclose(percs, code, atol=1e-3, rtol=1e-2)
A = torch.randn(1024, 1024, device="cuda")
A = A.to(dtype)
code = F.estimate_quantiles(A)
quantiles = torch.quantile(A.float(), percs)
diff = torch.abs(code - quantiles)
assert (diff > 5e-02).sum().item() == 0
===========changed ref 2===========
# module: tests.test_functional
def test_quantile_quantization():
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
code = F.estimate_quantiles(A1)
C = F.quantize_no_absmax(A1, code)
A2 = F.dequantize_no_absmax(C, code)
diff = torch.abs(A1 - A2).mean().item()
assert diff < 0.0075
A1 = torch.rand(1024, 1024, device="cuda")
code = F.estimate_quantiles(A1)
C = F.quantize_no_absmax(A1, code)
A2 = F.dequantize_no_absmax(C, code)
diff = torch.abs(A1 - A2).mean().item()
+ torch.testing.assert_close(A1, A2, atol=5e-3, rtol=0)
- torch.testing.assert_allclose(A1, A2, atol=5e-3, rtol=0)
assert diff < 0.001
===========changed ref 3===========
# module: tests.test_functional
def test_dynamic_quantization():
diffs = []
reldiffs = []
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
C, S = F.quantize(A1)
A2 = F.dequantize(C, S)
diff = torch.abs(A1 - A2)
reldiff = diff / torch.abs(A1 + 1e-8)
diffs.append(diff.mean().item())
reldiffs.append(reldiff.mean().item())
assert diff.mean().item() < 0.0135
# print(sum(diffs)/len(diffs))
# print(sum(reldiffs)/len(reldiffs))
for i in range(100):
A1 = torch.rand(1024, 1024, device="cuda")
C, S = F.quantize(A1)
A2 = F.dequantize(C, S)
diff = torch.abs(A1 - A2).mean().item()
+ torch.testing.assert_close(A1, A2, atol=1e-2, rtol=0)
- torch.testing.assert_allclose(A1, A2, atol=1e-2, rtol=0)
assert diff < 0.004
===========changed ref 4===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 8===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 9===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 10===========
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
+
===========changed ref 11===========
# module: bitsandbytes.functional
+ def dequantize_fp4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'fp4')
+
===========changed ref 12===========
# module: bitsandbytes.functional
+ def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
+
===========changed ref 13===========
# module: bitsandbytes.functional
+ def dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'nf4')
+
|
tests.test_functional/test_percentile_clipping
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<22>:<add> torch.testing.assert_close(gnorm_vec1, torch.sqrt(gnorm_vec2))
<del> torch.testing.assert_allclose(gnorm_vec1, torch.sqrt(gnorm_vec2))
<23>:<add> torch.testing.assert_close(clip1, clip2)
<del> torch.testing.assert_allclose(clip1, clip2)
<24>:<add> torch.testing.assert_close(gnorm1, gnorm2)
<del> torch.testing.assert_allclose(gnorm1, gnorm2)
|
# module: tests.test_functional
+ #print('nested=', nested, 'rand', blocksize, sum(diffs)/len(diffs))
+ #print('nested=', nested, 'rand', blocksize, sum(reldiffs)/len(reldiffs))
+
+
+
@pytest.mark.parametrize(
"gtype", [torch.float32, torch.float16], ids=["float", "half"]
)
def test_percentile_clipping(gtype):
<0> gnorm_vec1 = torch.zeros(100, device="cuda")
<1> gnorm_vec2 = torch.zeros(100, device="cuda")
<2> n = 4
<3> step = 0
<4> percentile = 5
<5> for i in range(k):
<6> step += 1
<7> g = torch.randn(n, n, dtype=gtype, device="cuda")
<8> gnorm1, clip2, gnorm_scale = F.percentile_clipping(
<9> g, gnorm_vec2, step, percentile=percentile
<10> )
<11> assert gnorm_scale == 1.0 if gnorm1 < clip2 else clip2 / gnorm1
<12>
<13> gnorm2 = torch.norm(g.float())
<14> if step == 1:
<15> gnorm_vec1[:] = gnorm2
<16> else:
<17> gnorm_vec1[step % 100] = gnorm2
<18>
<19> vals, idx = torch.sort(gnorm_vec1)
<20> clip1 = vals[percentile]
<21>
<22> torch.testing.assert_allclose(gnorm_vec1, torch.sqrt(gnorm_vec2))
<23> torch.testing.assert_allclose(clip1, clip2)
<24> torch.testing.assert_allclose(gnorm1, gnorm2)
<25>
|
===========changed ref 0===========
<s>
- #print('rand', blocksize, sum(diffs)/len(diffs))
- #print('rand', blocksize, sum(reldiffs)/len(reldiffs))
-
-
-
- @pytest.mark.parametrize("blocksize", [4096, 2048, 1024, 512, 256, 128, 64])
- @pytest.mark.skip("Stochastic has some bugs, but will be deprecated soon anyways.")
- def test_dynamic_blockwise_stochastic_quantization(blocksize):
- diffs = []
- reldiffs = []
- rand = torch.rand(1024).cuda()
- err = 0
- for i in range(100):
- A1 = torch.randn(1024, 1024, device="cuda")
- C1, S1 = F.quantize_blockwise(A1, rand=rand, blocksize=blocksize)
- C2, S2 = F.quantize_blockwise(A1, blocksize=blocksize)
- A2 = F.dequantize_blockwise(C1, S1, blocksize=blocksize)
- err += (A1-A2).abs().mean().item()/100
- # a maximunm distance of quantized values of 1
- torch.testing.assert_allclose(C1, C2, atol=1, rtol=0)
- fraction_smaller = (C1 < C2).float().sum() / C1.numel()
- fraction_larger = (C1 > C2).float().sum() / C1.numel()
- torch.testing.assert_allclose(fraction_larger, fraction_smaller, atol=0.01, rtol=0)
- assert err < 0.019
-
===========changed ref 1===========
# module: tests.test_functional
+ def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0, throw=True):
- def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0):
idx = torch.isclose(a, b, rtol, atol)
sumval = (idx == 0).sum().item()
if sumval > count:
+ if throw:
+ print(f"Too many values not close: assert {sumval} < {count}")
- print(f"Too many values not close: assert {sumval} < {count}")
+ torch.testing.assert_close(a, b, rtol, atol)
- torch.testing.assert_allclose(a, b, rtol, atol)
===========changed ref 2===========
# module: tests.test_functional
@pytest.mark.parametrize(
"dtype", [torch.float32, torch.float16], ids=["float", "half"]
)
def test_estimate_quantiles(dtype):
A = torch.rand(1024, 1024, device="cuda")
A = A.to(dtype)
code = F.estimate_quantiles(A)
percs = torch.linspace(1 / 512, 511 / 512, 256, device=A.device)
+ torch.testing.assert_close(percs, code, atol=1e-3, rtol=1e-2)
- torch.testing.assert_allclose(percs, code, atol=1e-3, rtol=1e-2)
A = torch.randn(1024, 1024, device="cuda")
A = A.to(dtype)
code = F.estimate_quantiles(A)
quantiles = torch.quantile(A.float(), percs)
diff = torch.abs(code - quantiles)
assert (diff > 5e-02).sum().item() == 0
===========changed ref 3===========
# module: tests.test_functional
def test_quantile_quantization():
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
code = F.estimate_quantiles(A1)
C = F.quantize_no_absmax(A1, code)
A2 = F.dequantize_no_absmax(C, code)
diff = torch.abs(A1 - A2).mean().item()
assert diff < 0.0075
A1 = torch.rand(1024, 1024, device="cuda")
code = F.estimate_quantiles(A1)
C = F.quantize_no_absmax(A1, code)
A2 = F.dequantize_no_absmax(C, code)
diff = torch.abs(A1 - A2).mean().item()
+ torch.testing.assert_close(A1, A2, atol=5e-3, rtol=0)
- torch.testing.assert_allclose(A1, A2, atol=5e-3, rtol=0)
assert diff < 0.001
===========changed ref 4===========
# module: tests.test_functional
def test_dynamic_quantization():
diffs = []
reldiffs = []
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
C, S = F.quantize(A1)
A2 = F.dequantize(C, S)
diff = torch.abs(A1 - A2)
reldiff = diff / torch.abs(A1 + 1e-8)
diffs.append(diff.mean().item())
reldiffs.append(reldiff.mean().item())
assert diff.mean().item() < 0.0135
# print(sum(diffs)/len(diffs))
# print(sum(reldiffs)/len(reldiffs))
for i in range(100):
A1 = torch.rand(1024, 1024, device="cuda")
C, S = F.quantize(A1)
A2 = F.dequantize(C, S)
diff = torch.abs(A1 - A2).mean().item()
+ torch.testing.assert_close(A1, A2, atol=1e-2, rtol=0)
- torch.testing.assert_allclose(A1, A2, atol=1e-2, rtol=0)
assert diff < 0.004
===========changed ref 5===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 8===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 9===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 10===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 11===========
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
+
===========changed ref 12===========
# module: bitsandbytes.functional
+ def dequantize_fp4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'fp4')
+
|
tests.test_functional/test_approx_igemm
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<4>:<add> #print("")
<del> print("")
<16>:<add> torch.testing.assert_close(
<del> torch.testing.assert_allclose(
|
# module: tests.test_functional
@pytest.mark.parametrize(
"dim1, dim2, quant_methods, batched", values, ids=names
)
def test_approx_igemm(dim1, dim2, quant_methods, batched):
<0> dim1 = dim1 - (dim1 % 32)
<1> dim2 = dim2 - (dim2 % 32)
<2> errors = []
<3> relerrors = []
<4> print("")
<5> for i in range(5):
<6> if batched:
<7> A = torch.normal(0, 0.5, size=(32, dim1, dim2 // 32), device="cuda")
<8> B = torch.normal(0, 0.5, size=(32, dim2 // 32, dim1), device="cuda")
<9> maxA, Ac = quant_methods[0](A, 2)
<10> maxB, Bc = quant_methods[1](B, 1)
<11> else:
<12> A = torch.normal(0, 0.5, size=(dim1, dim2), device="cuda")
<13> B = torch.normal(0, 0.5, size=(dim2, dim1), device="cuda")
<14> maxA, Ac = quant_methods[0](A, 1)
<15> maxB, Bc = quant_methods[1](B, 0)
<16> torch.testing.assert_allclose(
<17> quant_methods[2](maxA, Ac), A, atol=0.025, rtol=0.05
<18> )
<19> if batched:
<20> out2 = torch.bmm(A, B)
<21> C = torch.bmm(Ac.float(), Bc.float())
<22> else:
<23> out2 = torch.mm(A, B)
<24> C = F.igemm(Ac, Bc)
<25> out = quant_methods[4](maxA, maxB, C)
<26> std = out2.std()
<27> out /= std
<28> out2 /= std
<29> err = torch.abs(out - out2)
<30> </s>
|
===========below chunk 0===========
# module: tests.test_functional
@pytest.mark.parametrize(
"dim1, dim2, quant_methods, batched", values, ids=names
)
def test_approx_igemm(dim1, dim2, quant_methods, batched):
# offset: 1
errors.append(err.mean().item())
relerrors.append(relerr.mean().item())
print(mean(errors))
print(mean(relerrors))
===========changed ref 0===========
<s>
- #print('rand', blocksize, sum(diffs)/len(diffs))
- #print('rand', blocksize, sum(reldiffs)/len(reldiffs))
-
-
-
- @pytest.mark.parametrize("blocksize", [4096, 2048, 1024, 512, 256, 128, 64])
- @pytest.mark.skip("Stochastic has some bugs, but will be deprecated soon anyways.")
- def test_dynamic_blockwise_stochastic_quantization(blocksize):
- diffs = []
- reldiffs = []
- rand = torch.rand(1024).cuda()
- err = 0
- for i in range(100):
- A1 = torch.randn(1024, 1024, device="cuda")
- C1, S1 = F.quantize_blockwise(A1, rand=rand, blocksize=blocksize)
- C2, S2 = F.quantize_blockwise(A1, blocksize=blocksize)
- A2 = F.dequantize_blockwise(C1, S1, blocksize=blocksize)
- err += (A1-A2).abs().mean().item()/100
- # a maximunm distance of quantized values of 1
- torch.testing.assert_allclose(C1, C2, atol=1, rtol=0)
- fraction_smaller = (C1 < C2).float().sum() / C1.numel()
- fraction_larger = (C1 > C2).float().sum() / C1.numel()
- torch.testing.assert_allclose(fraction_larger, fraction_smaller, atol=0.01, rtol=0)
- assert err < 0.019
-
===========changed ref 1===========
# module: tests.test_functional
+ def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0, throw=True):
- def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0):
idx = torch.isclose(a, b, rtol, atol)
sumval = (idx == 0).sum().item()
if sumval > count:
+ if throw:
+ print(f"Too many values not close: assert {sumval} < {count}")
- print(f"Too many values not close: assert {sumval} < {count}")
+ torch.testing.assert_close(a, b, rtol, atol)
- torch.testing.assert_allclose(a, b, rtol, atol)
===========changed ref 2===========
# module: tests.test_functional
@pytest.mark.parametrize(
"dtype", [torch.float32, torch.float16], ids=["float", "half"]
)
def test_estimate_quantiles(dtype):
A = torch.rand(1024, 1024, device="cuda")
A = A.to(dtype)
code = F.estimate_quantiles(A)
percs = torch.linspace(1 / 512, 511 / 512, 256, device=A.device)
+ torch.testing.assert_close(percs, code, atol=1e-3, rtol=1e-2)
- torch.testing.assert_allclose(percs, code, atol=1e-3, rtol=1e-2)
A = torch.randn(1024, 1024, device="cuda")
A = A.to(dtype)
code = F.estimate_quantiles(A)
quantiles = torch.quantile(A.float(), percs)
diff = torch.abs(code - quantiles)
assert (diff > 5e-02).sum().item() == 0
===========changed ref 3===========
# module: tests.test_functional
+ #print('nested=', nested, 'rand', blocksize, sum(diffs)/len(diffs))
+ #print('nested=', nested, 'rand', blocksize, sum(reldiffs)/len(reldiffs))
+
+
+
@pytest.mark.parametrize(
"gtype", [torch.float32, torch.float16], ids=["float", "half"]
)
def test_percentile_clipping(gtype):
gnorm_vec1 = torch.zeros(100, device="cuda")
gnorm_vec2 = torch.zeros(100, device="cuda")
n = 4
step = 0
percentile = 5
for i in range(k):
step += 1
g = torch.randn(n, n, dtype=gtype, device="cuda")
gnorm1, clip2, gnorm_scale = F.percentile_clipping(
g, gnorm_vec2, step, percentile=percentile
)
assert gnorm_scale == 1.0 if gnorm1 < clip2 else clip2 / gnorm1
gnorm2 = torch.norm(g.float())
if step == 1:
gnorm_vec1[:] = gnorm2
else:
gnorm_vec1[step % 100] = gnorm2
vals, idx = torch.sort(gnorm_vec1)
clip1 = vals[percentile]
+ torch.testing.assert_close(gnorm_vec1, torch.sqrt(gnorm_vec2))
- torch.testing.assert_allclose(gnorm_vec1, torch.sqrt(gnorm_vec2))
+ torch.testing.assert_close(clip1, clip2)
- torch.testing.assert_allclose(clip1, clip2)
+ torch.testing.assert_close(gnorm1, gnorm2)
- torch.testing.assert_allclose(gnorm1, gnorm2)
===========changed ref 4===========
# module: tests.test_functional
def test_quantile_quantization():
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
code = F.estimate_quantiles(A1)
C = F.quantize_no_absmax(A1, code)
A2 = F.dequantize_no_absmax(C, code)
diff = torch.abs(A1 - A2).mean().item()
assert diff < 0.0075
A1 = torch.rand(1024, 1024, device="cuda")
code = F.estimate_quantiles(A1)
C = F.quantize_no_absmax(A1, code)
A2 = F.dequantize_no_absmax(C, code)
diff = torch.abs(A1 - A2).mean().item()
+ torch.testing.assert_close(A1, A2, atol=5e-3, rtol=0)
- torch.testing.assert_allclose(A1, A2, atol=5e-3, rtol=0)
assert diff < 0.001
|
tests.test_functional/test_igemm
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<29>:<add> torch.testing.assert_close(out.float(), out2)
<del> torch
|
# module: tests.test_functional
@pytest.mark.parametrize(
"hidden_dim, batch_dim, transpose, seq_dim", values, ids=names
)
def test_igemm(hidden_dim, batch_dim, transpose, seq_dim):
<0> hidden_dim = hidden_dim - (hidden_dim % 32)
<1> batch_dim = batch_dim - (batch_dim % 16)
<2> seq_dim = seq_dim - (seq_dim % 16)
<3> for i in range(k):
<4> shapeA = (
<5> (batch_dim, hidden_dim)
<6> if not transpose[0]
<7> else (hidden_dim, batch_dim)
<8> )
<9> shapeB = (
<10> (32 * random.randint(1, 4), hidden_dim)
<11> if transpose[1]
<12> else (hidden_dim, 32 * random.randint(1, 4))
<13> )
<14> A = torch.randint(-128, 127, size=shapeA, device="cuda").to(torch.int8)
<15> B = torch.randint(-128, 127, size=shapeB, device="cuda").to(torch.int8)
<16> if not transpose[0] and not transpose[1]:
<17> out2 = torch.matmul(A.float(), B.float())
<18> out = F.igemm(A, B)
<19> elif not transpose[0] and transpose[1]:
<20> out2 = torch.matmul(A.float(), B.t().float())
<21> out = F.igemm(A, B.t())
<22> elif transpose[0] and not transpose[1]:
<23> out2 = torch.matmul(A.t().float(), B.float())
<24> out = F.igemm(A.t(), B)
<25> elif transpose[0] and transpose[1]:
<26> out2 = torch.matmul(A.t().float(), B.t().float())
<27> out = F.igemm(A.t(), B.t())
<28>
<29> torch</s>
|
===========below chunk 0===========
# module: tests.test_functional
@pytest.mark.parametrize(
"hidden_dim, batch_dim, transpose, seq_dim", values, ids=names
)
def test_igemm(hidden_dim, batch_dim, transpose, seq_dim):
# offset: 1
for i in range(k):
shapeA = (batch_dim, seq_dim, hidden_dim)
shapeB = (
(32 * random.randint(1, 4), hidden_dim)
if transpose[1]
else (hidden_dim, 32 * random.randint(1, 4))
)
A = torch.randint(-128, 127, size=shapeA, device="cuda").to(torch.int8)
B = torch.randint(-128, 127, size=shapeB, device="cuda").to(torch.int8)
if not transpose[0] and not transpose[1]:
out2 = torch.matmul(A.float(), B.float())
out = F.igemm(A, B)
elif not transpose[0] and transpose[1]:
out2 = torch.matmul(A.float(), B.t().float())
out = F.igemm(A, B.t())
torch.testing.assert_allclose(out.float(), out2)
===========changed ref 0===========
<s>
- #print('rand', blocksize, sum(diffs)/len(diffs))
- #print('rand', blocksize, sum(reldiffs)/len(reldiffs))
-
-
-
- @pytest.mark.parametrize("blocksize", [4096, 2048, 1024, 512, 256, 128, 64])
- @pytest.mark.skip("Stochastic has some bugs, but will be deprecated soon anyways.")
- def test_dynamic_blockwise_stochastic_quantization(blocksize):
- diffs = []
- reldiffs = []
- rand = torch.rand(1024).cuda()
- err = 0
- for i in range(100):
- A1 = torch.randn(1024, 1024, device="cuda")
- C1, S1 = F.quantize_blockwise(A1, rand=rand, blocksize=blocksize)
- C2, S2 = F.quantize_blockwise(A1, blocksize=blocksize)
- A2 = F.dequantize_blockwise(C1, S1, blocksize=blocksize)
- err += (A1-A2).abs().mean().item()/100
- # a maximunm distance of quantized values of 1
- torch.testing.assert_allclose(C1, C2, atol=1, rtol=0)
- fraction_smaller = (C1 < C2).float().sum() / C1.numel()
- fraction_larger = (C1 > C2).float().sum() / C1.numel()
- torch.testing.assert_allclose(fraction_larger, fraction_smaller, atol=0.01, rtol=0)
- assert err < 0.019
-
===========changed ref 1===========
# module: tests.test_functional
+ def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0, throw=True):
- def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0):
idx = torch.isclose(a, b, rtol, atol)
sumval = (idx == 0).sum().item()
if sumval > count:
+ if throw:
+ print(f"Too many values not close: assert {sumval} < {count}")
- print(f"Too many values not close: assert {sumval} < {count}")
+ torch.testing.assert_close(a, b, rtol, atol)
- torch.testing.assert_allclose(a, b, rtol, atol)
===========changed ref 2===========
# module: tests.test_functional
@pytest.mark.parametrize(
"dtype", [torch.float32, torch.float16], ids=["float", "half"]
)
def test_estimate_quantiles(dtype):
A = torch.rand(1024, 1024, device="cuda")
A = A.to(dtype)
code = F.estimate_quantiles(A)
percs = torch.linspace(1 / 512, 511 / 512, 256, device=A.device)
+ torch.testing.assert_close(percs, code, atol=1e-3, rtol=1e-2)
- torch.testing.assert_allclose(percs, code, atol=1e-3, rtol=1e-2)
A = torch.randn(1024, 1024, device="cuda")
A = A.to(dtype)
code = F.estimate_quantiles(A)
quantiles = torch.quantile(A.float(), percs)
diff = torch.abs(code - quantiles)
assert (diff > 5e-02).sum().item() == 0
===========changed ref 3===========
# module: tests.test_functional
+ #print('nested=', nested, 'rand', blocksize, sum(diffs)/len(diffs))
+ #print('nested=', nested, 'rand', blocksize, sum(reldiffs)/len(reldiffs))
+
+
+
@pytest.mark.parametrize(
"gtype", [torch.float32, torch.float16], ids=["float", "half"]
)
def test_percentile_clipping(gtype):
gnorm_vec1 = torch.zeros(100, device="cuda")
gnorm_vec2 = torch.zeros(100, device="cuda")
n = 4
step = 0
percentile = 5
for i in range(k):
step += 1
g = torch.randn(n, n, dtype=gtype, device="cuda")
gnorm1, clip2, gnorm_scale = F.percentile_clipping(
g, gnorm_vec2, step, percentile=percentile
)
assert gnorm_scale == 1.0 if gnorm1 < clip2 else clip2 / gnorm1
gnorm2 = torch.norm(g.float())
if step == 1:
gnorm_vec1[:] = gnorm2
else:
gnorm_vec1[step % 100] = gnorm2
vals, idx = torch.sort(gnorm_vec1)
clip1 = vals[percentile]
+ torch.testing.assert_close(gnorm_vec1, torch.sqrt(gnorm_vec2))
- torch.testing.assert_allclose(gnorm_vec1, torch.sqrt(gnorm_vec2))
+ torch.testing.assert_close(clip1, clip2)
- torch.testing.assert_allclose(clip1, clip2)
+ torch.testing.assert_close(gnorm1, gnorm2)
- torch.testing.assert_allclose(gnorm1, gnorm2)
|
tests.test_functional/test_dim3_igemm
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<16>:<add> torch.testing.assert_close(out.float(), out2)
<del> torch.testing.assert_allclose(out.float(), out2)
|
# module: tests.test_functional
@pytest.mark.parametrize("seq_dim, hidden_dim, batch_dim", values, ids=names)
def test_dim3_igemm(seq_dim, hidden_dim, batch_dim):
<0> seq_dim = seq_dim - (seq_dim % 32)
<1> hidden_dim = hidden_dim - (hidden_dim % 32)
<2> batch_dim = batch_dim - (batch_dim % 2)
<3> for i in range(25):
<4> A = torch.randint(
<5> -128, 127, size=(batch_dim, seq_dim, hidden_dim), device="cuda"
<6> ).to(torch.int8)
<7> B = torch.randint(
<8> -128, 127, size=(batch_dim, seq_dim, 1024), device="cuda"
<9> ).to(torch.int8)
<10> out2 = torch.einsum("bsi, bso->io", A.float(), B.float())
<11> iout = torch.empty(
<12> A.shape[2], B.shape[2], dtype=torch.int32, device=A.device
<13> )
<14> out = F.igemm(A, B, out=iout)
<15>
<16> torch.testing.assert_allclose(out.float(), out2)
<17>
|
===========changed ref 0===========
<s>
- #print('rand', blocksize, sum(diffs)/len(diffs))
- #print('rand', blocksize, sum(reldiffs)/len(reldiffs))
-
-
-
- @pytest.mark.parametrize("blocksize", [4096, 2048, 1024, 512, 256, 128, 64])
- @pytest.mark.skip("Stochastic has some bugs, but will be deprecated soon anyways.")
- def test_dynamic_blockwise_stochastic_quantization(blocksize):
- diffs = []
- reldiffs = []
- rand = torch.rand(1024).cuda()
- err = 0
- for i in range(100):
- A1 = torch.randn(1024, 1024, device="cuda")
- C1, S1 = F.quantize_blockwise(A1, rand=rand, blocksize=blocksize)
- C2, S2 = F.quantize_blockwise(A1, blocksize=blocksize)
- A2 = F.dequantize_blockwise(C1, S1, blocksize=blocksize)
- err += (A1-A2).abs().mean().item()/100
- # a maximunm distance of quantized values of 1
- torch.testing.assert_allclose(C1, C2, atol=1, rtol=0)
- fraction_smaller = (C1 < C2).float().sum() / C1.numel()
- fraction_larger = (C1 > C2).float().sum() / C1.numel()
- torch.testing.assert_allclose(fraction_larger, fraction_smaller, atol=0.01, rtol=0)
- assert err < 0.019
-
===========changed ref 1===========
# module: tests.test_functional
+ def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0, throw=True):
- def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0):
idx = torch.isclose(a, b, rtol, atol)
sumval = (idx == 0).sum().item()
if sumval > count:
+ if throw:
+ print(f"Too many values not close: assert {sumval} < {count}")
- print(f"Too many values not close: assert {sumval} < {count}")
+ torch.testing.assert_close(a, b, rtol, atol)
- torch.testing.assert_allclose(a, b, rtol, atol)
===========changed ref 2===========
# module: tests.test_functional
@pytest.mark.parametrize(
"dtype", [torch.float32, torch.float16], ids=["float", "half"]
)
def test_estimate_quantiles(dtype):
A = torch.rand(1024, 1024, device="cuda")
A = A.to(dtype)
code = F.estimate_quantiles(A)
percs = torch.linspace(1 / 512, 511 / 512, 256, device=A.device)
+ torch.testing.assert_close(percs, code, atol=1e-3, rtol=1e-2)
- torch.testing.assert_allclose(percs, code, atol=1e-3, rtol=1e-2)
A = torch.randn(1024, 1024, device="cuda")
A = A.to(dtype)
code = F.estimate_quantiles(A)
quantiles = torch.quantile(A.float(), percs)
diff = torch.abs(code - quantiles)
assert (diff > 5e-02).sum().item() == 0
===========changed ref 3===========
# module: tests.test_functional
+ #print('nested=', nested, 'rand', blocksize, sum(diffs)/len(diffs))
+ #print('nested=', nested, 'rand', blocksize, sum(reldiffs)/len(reldiffs))
+
+
+
@pytest.mark.parametrize(
"gtype", [torch.float32, torch.float16], ids=["float", "half"]
)
def test_percentile_clipping(gtype):
gnorm_vec1 = torch.zeros(100, device="cuda")
gnorm_vec2 = torch.zeros(100, device="cuda")
n = 4
step = 0
percentile = 5
for i in range(k):
step += 1
g = torch.randn(n, n, dtype=gtype, device="cuda")
gnorm1, clip2, gnorm_scale = F.percentile_clipping(
g, gnorm_vec2, step, percentile=percentile
)
assert gnorm_scale == 1.0 if gnorm1 < clip2 else clip2 / gnorm1
gnorm2 = torch.norm(g.float())
if step == 1:
gnorm_vec1[:] = gnorm2
else:
gnorm_vec1[step % 100] = gnorm2
vals, idx = torch.sort(gnorm_vec1)
clip1 = vals[percentile]
+ torch.testing.assert_close(gnorm_vec1, torch.sqrt(gnorm_vec2))
- torch.testing.assert_allclose(gnorm_vec1, torch.sqrt(gnorm_vec2))
+ torch.testing.assert_close(clip1, clip2)
- torch.testing.assert_allclose(clip1, clip2)
+ torch.testing.assert_close(gnorm1, gnorm2)
- torch.testing.assert_allclose(gnorm1, gnorm2)
===========changed ref 4===========
# module: tests.test_functional
@pytest.mark.parametrize(
"dim1, dim2, quant_methods, batched", values, ids=names
)
def test_approx_igemm(dim1, dim2, quant_methods, batched):
dim1 = dim1 - (dim1 % 32)
dim2 = dim2 - (dim2 % 32)
errors = []
relerrors = []
+ #print("")
- print("")
for i in range(5):
if batched:
A = torch.normal(0, 0.5, size=(32, dim1, dim2 // 32), device="cuda")
B = torch.normal(0, 0.5, size=(32, dim2 // 32, dim1), device="cuda")
maxA, Ac = quant_methods[0](A, 2)
maxB, Bc = quant_methods[1](B, 1)
else:
A = torch.normal(0, 0.5, size=(dim1, dim2), device="cuda")
B = torch.normal(0, 0.5, size=(dim2, dim1), device="cuda")
maxA, Ac = quant_methods[0](A, 1)
maxB, Bc = quant_methods[1](B, 0)
+ torch.testing.assert_close(
- torch.testing.assert_allclose(
quant_methods[2](maxA, Ac), A, atol=0.025, rtol=0.05
)
if batched:
out2 = torch.bmm(A, B)
C = torch.bmm(Ac.float(), Bc.float())
else:
out2 = torch.mm(A, B)
C = F.igemm(Ac, Bc)
out = quant_methods[4](maxA, maxB, C)
std = out2.std()
out /= std
out2 /= std
err = torch.abs(out - out2)
relerr = err / torch.abs(</s>
|
tests.test_functional/test_ibmm
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dim3, dim4, transpose", values, ids=names)
def test_ibmm(dim1, dim2, dim3, dim4, transpose):
<0> dim2 = dim2 - (dim2 % 16)
<1> dim3 = dim3 - (dim3 % 16)
<2> dim4 = dim4 - (dim4 % 16)
<3> for i in range(k):
<4> shapeA = (dim1, dim3, dim2) if transpose[0] else (dim1, dim2, dim3)
<5> shapeB = (dim1, dim4, dim3) if transpose[1] else (dim1, dim3, dim4)
<6> A = torch.randint(-128, 127, size=shapeA, device="cuda").to(torch.int8)
<7> B = torch.randint(-128, 127, size=shapeB, device="cuda").to(torch.int8)
<8>
<9> if not transpose[0] and not transpose[1]:
<10> out2 = torch.bmm(A.float(), B.float())
<11> out = F.igemm(A, B)
<12> elif not transpose[0] and transpose[1]:
<13> out2 = torch.bmm(A.float(), B.permute([0, 2, 1]).float())
<14> out = F.igemm(A, B.permute([0, 2, 1]))
<15> elif transpose[0] and not transpose[1]:
<16> out2 = torch.bmm(A.permute([0, 2, 1]).float(), B.float())
<17> out = F.igemm(A.permute([0, 2, 1]), B)
<18> elif transpose[0] and transpose[1]:
<19> out2 = torch.bmm(
<20> A.permute([0, 2, 1]).float(), B.permute([0, 2, 1]).float()
<21> )
<22> out = F.igemm(A.permute([0, 2, 1]</s>
|
===========below chunk 0===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dim3, dim4, transpose", values, ids=names)
def test_ibmm(dim1, dim2, dim3, dim4, transpose):
# offset: 1
torch.testing.assert_allclose(out.float(), out2.float())
===========changed ref 0===========
# module: tests.test_functional
@pytest.mark.parametrize("seq_dim, hidden_dim, batch_dim", values, ids=names)
def test_dim3_igemm(seq_dim, hidden_dim, batch_dim):
seq_dim = seq_dim - (seq_dim % 32)
hidden_dim = hidden_dim - (hidden_dim % 32)
batch_dim = batch_dim - (batch_dim % 2)
for i in range(25):
A = torch.randint(
-128, 127, size=(batch_dim, seq_dim, hidden_dim), device="cuda"
).to(torch.int8)
B = torch.randint(
-128, 127, size=(batch_dim, seq_dim, 1024), device="cuda"
).to(torch.int8)
out2 = torch.einsum("bsi, bso->io", A.float(), B.float())
iout = torch.empty(
A.shape[2], B.shape[2], dtype=torch.int32, device=A.device
)
out = F.igemm(A, B, out=iout)
+ torch.testing.assert_close(out.float(), out2)
- torch.testing.assert_allclose(out.float(), out2)
===========changed ref 1===========
<s>
- #print('rand', blocksize, sum(diffs)/len(diffs))
- #print('rand', blocksize, sum(reldiffs)/len(reldiffs))
-
-
-
- @pytest.mark.parametrize("blocksize", [4096, 2048, 1024, 512, 256, 128, 64])
- @pytest.mark.skip("Stochastic has some bugs, but will be deprecated soon anyways.")
- def test_dynamic_blockwise_stochastic_quantization(blocksize):
- diffs = []
- reldiffs = []
- rand = torch.rand(1024).cuda()
- err = 0
- for i in range(100):
- A1 = torch.randn(1024, 1024, device="cuda")
- C1, S1 = F.quantize_blockwise(A1, rand=rand, blocksize=blocksize)
- C2, S2 = F.quantize_blockwise(A1, blocksize=blocksize)
- A2 = F.dequantize_blockwise(C1, S1, blocksize=blocksize)
- err += (A1-A2).abs().mean().item()/100
- # a maximunm distance of quantized values of 1
- torch.testing.assert_allclose(C1, C2, atol=1, rtol=0)
- fraction_smaller = (C1 < C2).float().sum() / C1.numel()
- fraction_larger = (C1 > C2).float().sum() / C1.numel()
- torch.testing.assert_allclose(fraction_larger, fraction_smaller, atol=0.01, rtol=0)
- assert err < 0.019
-
===========changed ref 2===========
# module: tests.test_functional
+ def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0, throw=True):
- def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0):
idx = torch.isclose(a, b, rtol, atol)
sumval = (idx == 0).sum().item()
if sumval > count:
+ if throw:
+ print(f"Too many values not close: assert {sumval} < {count}")
- print(f"Too many values not close: assert {sumval} < {count}")
+ torch.testing.assert_close(a, b, rtol, atol)
- torch.testing.assert_allclose(a, b, rtol, atol)
===========changed ref 3===========
# module: tests.test_functional
@pytest.mark.parametrize(
"dtype", [torch.float32, torch.float16], ids=["float", "half"]
)
def test_estimate_quantiles(dtype):
A = torch.rand(1024, 1024, device="cuda")
A = A.to(dtype)
code = F.estimate_quantiles(A)
percs = torch.linspace(1 / 512, 511 / 512, 256, device=A.device)
+ torch.testing.assert_close(percs, code, atol=1e-3, rtol=1e-2)
- torch.testing.assert_allclose(percs, code, atol=1e-3, rtol=1e-2)
A = torch.randn(1024, 1024, device="cuda")
A = A.to(dtype)
code = F.estimate_quantiles(A)
quantiles = torch.quantile(A.float(), percs)
diff = torch.abs(code - quantiles)
assert (diff > 5e-02).sum().item() == 0
===========changed ref 4===========
# module: tests.test_functional
+ #print('nested=', nested, 'rand', blocksize, sum(diffs)/len(diffs))
+ #print('nested=', nested, 'rand', blocksize, sum(reldiffs)/len(reldiffs))
+
+
+
@pytest.mark.parametrize(
"gtype", [torch.float32, torch.float16], ids=["float", "half"]
)
def test_percentile_clipping(gtype):
gnorm_vec1 = torch.zeros(100, device="cuda")
gnorm_vec2 = torch.zeros(100, device="cuda")
n = 4
step = 0
percentile = 5
for i in range(k):
step += 1
g = torch.randn(n, n, dtype=gtype, device="cuda")
gnorm1, clip2, gnorm_scale = F.percentile_clipping(
g, gnorm_vec2, step, percentile=percentile
)
assert gnorm_scale == 1.0 if gnorm1 < clip2 else clip2 / gnorm1
gnorm2 = torch.norm(g.float())
if step == 1:
gnorm_vec1[:] = gnorm2
else:
gnorm_vec1[step % 100] = gnorm2
vals, idx = torch.sort(gnorm_vec1)
clip1 = vals[percentile]
+ torch.testing.assert_close(gnorm_vec1, torch.sqrt(gnorm_vec2))
- torch.testing.assert_allclose(gnorm_vec1, torch.sqrt(gnorm_vec2))
+ torch.testing.assert_close(clip1, clip2)
- torch.testing.assert_allclose(clip1, clip2)
+ torch.testing.assert_close(gnorm1, gnorm2)
- torch.testing.assert_allclose(gnorm1, gnorm2)
|
|
tests.test_functional/test_nvidia_transform
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<16>:<add> torch.testing.assert_close(A.flatten(), out.flatten())
<del> torch.testing.assert_allclose(A.flatten(), out.flatten())
<18>:<add> torch.testing.assert_close(A.t().flatten(), out.flatten())
<del> torch.testing.assert_allclose(A.t().flatten(), out.flatten())
|
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose",values,ids=names)
def test_nvidia_transform(dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose):
<0> if dims == 3 and out_order != "col32":
<1> return
<2> if dtype == torch.int32 and out_order != "col32":
<3> return
<4> func = F.get_transform_func(dtype, orderA, orderOut, transpose)
<5>
<6> if dims == 2:
<7> A = torch.randint(-128, 127, size=(dim1, dim2), device="cuda").to(dtype)
<8> elif dims == 3:
<9> A = torch.randint(-128, 127, size=(dim1, dim2, dim3), device="cuda").to(
<10> dtype
<11> )
<12>
<13> out, S = F.nvidia_transform(A, to_order=orderOut)
<14>
<15> if orderOut == "row":
<16> torch.testing.assert_allclose(A.flatten(), out.flatten())
<17> elif orderOut == "col":
<18> torch.testing.assert_allclose(A.t().flatten(), out.flatten())
<19> elif orderOut == "col32":
<20> if dims == 2:
<21> n = A.shape[0] * (A.shape[1] + (32 - (A.shape[1] % 32)))
<22> elif dims == 3:
<23> n = (
<24> A.shape[0]
<25> * A.shape[1]
<26> * (A.shape[2] + (32 - (A.shape[2] % 32)))
<27> )
<28> assert out.numel() == n
<29> elif orderOut == "col_turing":
<30> # 32 col 8 row tiles
<31> n = (A.shape[0] + (8 - A.shape[0] % 8)) * (
<32> A.shape[1] + (32</s>
|
===========below chunk 0===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose",values,ids=names)
def test_nvidia_transform(dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose):
# offset: 1
)
assert out.numel() == n
total_coltile = (A.shape[1] // 32) + (1 if A.shape[1] % 32 != 0 else 0)
for row in range(A.shape[0]):
for col in range(A.shape[1]):
i = row * A.shape[1]
j = col
coltile = (col // 32) + (1 if col % 32 != 0 else 0)
rowtile = (
(row // 8) + (1 if row % 8 != 0 else 0)
) * total_coltile
offset = 32 * 8 * (rowtile + coltile)
col2 = col % 32
row2 = (row % 8) * 32
assert A.flatten()[i + j] == A[row, col]
# assert A.flatten()[i+j] == out.flatten()[row2+col2]
# torch.testing.assert_allclose(A.flatten()[i+j], A[row, col])
# torch.testing.assert_allclose(A.flatten()[i+j], out.flatten()[row2+ col2+block_offset])
if orderOut == "col32":
out2, S = F.nvidia_transform(
out, from_order=orderOut, to_order="row", state=S
)
torch.testing.assert_allclose(A, out2)
===========changed ref 0===========
# module: tests.test_functional
@pytest.mark.parametrize("seq_dim, hidden_dim, batch_dim", values, ids=names)
def test_dim3_igemm(seq_dim, hidden_dim, batch_dim):
seq_dim = seq_dim - (seq_dim % 32)
hidden_dim = hidden_dim - (hidden_dim % 32)
batch_dim = batch_dim - (batch_dim % 2)
for i in range(25):
A = torch.randint(
-128, 127, size=(batch_dim, seq_dim, hidden_dim), device="cuda"
).to(torch.int8)
B = torch.randint(
-128, 127, size=(batch_dim, seq_dim, 1024), device="cuda"
).to(torch.int8)
out2 = torch.einsum("bsi, bso->io", A.float(), B.float())
iout = torch.empty(
A.shape[2], B.shape[2], dtype=torch.int32, device=A.device
)
out = F.igemm(A, B, out=iout)
+ torch.testing.assert_close(out.float(), out2)
- torch.testing.assert_allclose(out.float(), out2)
===========changed ref 1===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dim3, dim4, transpose", values, ids=names)
def test_ibmm(dim1, dim2, dim3, dim4, transpose):
dim2 = dim2 - (dim2 % 16)
dim3 = dim3 - (dim3 % 16)
dim4 = dim4 - (dim4 % 16)
for i in range(k):
shapeA = (dim1, dim3, dim2) if transpose[0] else (dim1, dim2, dim3)
shapeB = (dim1, dim4, dim3) if transpose[1] else (dim1, dim3, dim4)
A = torch.randint(-128, 127, size=shapeA, device="cuda").to(torch.int8)
B = torch.randint(-128, 127, size=shapeB, device="cuda").to(torch.int8)
if not transpose[0] and not transpose[1]:
out2 = torch.bmm(A.float(), B.float())
out = F.igemm(A, B)
elif not transpose[0] and transpose[1]:
out2 = torch.bmm(A.float(), B.permute([0, 2, 1]).float())
out = F.igemm(A, B.permute([0, 2, 1]))
elif transpose[0] and not transpose[1]:
out2 = torch.bmm(A.permute([0, 2, 1]).float(), B.float())
out = F.igemm(A.permute([0, 2, 1]), B)
elif transpose[0] and transpose[1]:
out2 = torch.bmm(
A.permute([0, 2, 1]).float(), B.permute([0, 2, 1]).float()
)
out = F.igemm(A.permute([0, 2, 1]), B.permute([0, 2, 1]))
+ torch.testing.assert_close</s>
===========changed ref 2===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dim3, dim4, transpose", values, ids=names)
def test_ibmm(dim1, dim2, dim3, dim4, transpose):
# offset: 1
<s>([0, 2, 1]), B.permute([0, 2, 1]))
+ torch.testing.assert_close(out.float(), out2.float())
- torch.testing.assert_allclose(out.float(), out2.float())
===========changed ref 3===========
<s>
- #print('rand', blocksize, sum(diffs)/len(diffs))
- #print('rand', blocksize, sum(reldiffs)/len(reldiffs))
-
-
-
- @pytest.mark.parametrize("blocksize", [4096, 2048, 1024, 512, 256, 128, 64])
- @pytest.mark.skip("Stochastic has some bugs, but will be deprecated soon anyways.")
- def test_dynamic_blockwise_stochastic_quantization(blocksize):
- diffs = []
- reldiffs = []
- rand = torch.rand(1024).cuda()
- err = 0
- for i in range(100):
- A1 = torch.randn(1024, 1024, device="cuda")
- C1, S1 = F.quantize_blockwise(A1, rand=rand, blocksize=blocksize)
- C2, S2 = F.quantize_blockwise(A1, blocksize=blocksize)
- A2 = F.dequantize_blockwise(C1, S1, blocksize=blocksize)
- err += (A1-A2).abs().mean().item()/100
- # a maximunm distance of quantized values of 1
- torch.testing.assert_allclose(C1, C2, atol=1, rtol=0)
- fraction_smaller = (C1 < C2).float().sum() / C1.numel()
- fraction_larger = (C1 > C2).float().sum() / C1.numel()
- torch.testing.assert_allclose(fraction_larger, fraction_smaller, atol=0.01, rtol=0)
- assert err < 0.019
-
|
tests.test_functional/test_igemmlt_int
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<18>:<add> torch.testing.assert_close(C1, C3.float())
<del> torch.testing.assert_allclose(C1, C3.float())
<29>:<add> torch.testing.assert_close(C1, C3.float())
<del>
|
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dim3, dim4, dims, ldb", values, ids=names)
def test_igemmlt_int(dim1, dim2, dim3, dim4, dims, ldb):
<0> for i in range(k):
<1> if dims == 2:
<2> A = torch.randint(-128, 127, size=(dim1, dim3), device="cuda").to(
<3> torch.int8
<4> )
<5> elif dims == 3:
<6> A = torch.randint(
<7> -128, 127, size=(dim1, dim2, dim3), device="cuda"
<8> ).to(torch.int8)
<9> B = torch.randint(-128, 127, size=(dim4, dim3), device="cuda").to(
<10> torch.int8
<11> )
<12> C1 = torch.matmul(A.float(), B.t().float())
<13>
<14> A2, SA = F.transform(A, "col32")
<15> B2, SB = F.transform(B, "col_turing")
<16> C2, SC = F.igemmlt(A2, B2, SA, SB)
<17> C3, S = F.nvidia_transform(C2, "row", state=SC)
<18> torch.testing.assert_allclose(C1, C3.float())
<19>
<20> # transpose
<21> B = torch.randint(-128, 127, size=(dim3, dim4), device="cuda").to(
<22> torch.int8
<23> )
<24> C1 = torch.matmul(A.float(), B.float())
<25>
<26> B2t, SBt = F.transform(B, "col_turing", transpose=True)
<27> C2, SC = F.igemmlt(A2, B2t, SA, SBt)
<28> C3, S = F.nvidia_transform(C2, "row", state=SC)
<29> </s>
|
===========below chunk 0===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dim3, dim4, dims, ldb", values, ids=names)
def test_igemmlt_int(dim1, dim2, dim3, dim4, dims, ldb):
# offset: 1
===========changed ref 0===========
# module: tests.test_functional
@pytest.mark.parametrize("seq_dim, hidden_dim, batch_dim", values, ids=names)
def test_dim3_igemm(seq_dim, hidden_dim, batch_dim):
seq_dim = seq_dim - (seq_dim % 32)
hidden_dim = hidden_dim - (hidden_dim % 32)
batch_dim = batch_dim - (batch_dim % 2)
for i in range(25):
A = torch.randint(
-128, 127, size=(batch_dim, seq_dim, hidden_dim), device="cuda"
).to(torch.int8)
B = torch.randint(
-128, 127, size=(batch_dim, seq_dim, 1024), device="cuda"
).to(torch.int8)
out2 = torch.einsum("bsi, bso->io", A.float(), B.float())
iout = torch.empty(
A.shape[2], B.shape[2], dtype=torch.int32, device=A.device
)
out = F.igemm(A, B, out=iout)
+ torch.testing.assert_close(out.float(), out2)
- torch.testing.assert_allclose(out.float(), out2)
===========changed ref 1===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dim3, dim4, transpose", values, ids=names)
def test_ibmm(dim1, dim2, dim3, dim4, transpose):
dim2 = dim2 - (dim2 % 16)
dim3 = dim3 - (dim3 % 16)
dim4 = dim4 - (dim4 % 16)
for i in range(k):
shapeA = (dim1, dim3, dim2) if transpose[0] else (dim1, dim2, dim3)
shapeB = (dim1, dim4, dim3) if transpose[1] else (dim1, dim3, dim4)
A = torch.randint(-128, 127, size=shapeA, device="cuda").to(torch.int8)
B = torch.randint(-128, 127, size=shapeB, device="cuda").to(torch.int8)
if not transpose[0] and not transpose[1]:
out2 = torch.bmm(A.float(), B.float())
out = F.igemm(A, B)
elif not transpose[0] and transpose[1]:
out2 = torch.bmm(A.float(), B.permute([0, 2, 1]).float())
out = F.igemm(A, B.permute([0, 2, 1]))
elif transpose[0] and not transpose[1]:
out2 = torch.bmm(A.permute([0, 2, 1]).float(), B.float())
out = F.igemm(A.permute([0, 2, 1]), B)
elif transpose[0] and transpose[1]:
out2 = torch.bmm(
A.permute([0, 2, 1]).float(), B.permute([0, 2, 1]).float()
)
out = F.igemm(A.permute([0, 2, 1]), B.permute([0, 2, 1]))
+ torch.testing.assert_close</s>
===========changed ref 2===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dim3, dim4, transpose", values, ids=names)
def test_ibmm(dim1, dim2, dim3, dim4, transpose):
# offset: 1
<s>([0, 2, 1]), B.permute([0, 2, 1]))
+ torch.testing.assert_close(out.float(), out2.float())
- torch.testing.assert_allclose(out.float(), out2.float())
===========changed ref 3===========
<s>
- #print('rand', blocksize, sum(diffs)/len(diffs))
- #print('rand', blocksize, sum(reldiffs)/len(reldiffs))
-
-
-
- @pytest.mark.parametrize("blocksize", [4096, 2048, 1024, 512, 256, 128, 64])
- @pytest.mark.skip("Stochastic has some bugs, but will be deprecated soon anyways.")
- def test_dynamic_blockwise_stochastic_quantization(blocksize):
- diffs = []
- reldiffs = []
- rand = torch.rand(1024).cuda()
- err = 0
- for i in range(100):
- A1 = torch.randn(1024, 1024, device="cuda")
- C1, S1 = F.quantize_blockwise(A1, rand=rand, blocksize=blocksize)
- C2, S2 = F.quantize_blockwise(A1, blocksize=blocksize)
- A2 = F.dequantize_blockwise(C1, S1, blocksize=blocksize)
- err += (A1-A2).abs().mean().item()/100
- # a maximunm distance of quantized values of 1
- torch.testing.assert_allclose(C1, C2, atol=1, rtol=0)
- fraction_smaller = (C1 < C2).float().sum() / C1.numel()
- fraction_larger = (C1 > C2).float().sum() / C1.numel()
- torch.testing.assert_allclose(fraction_larger, fraction_smaller, atol=0.01, rtol=0)
- assert err < 0.019
-
===========changed ref 4===========
# module: tests.test_functional
+ def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0, throw=True):
- def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0):
idx = torch.isclose(a, b, rtol, atol)
sumval = (idx == 0).sum().item()
if sumval > count:
+ if throw:
+ print(f"Too many values not close: assert {sumval} < {count}")
- print(f"Too many values not close: assert {sumval} < {count}")
+ torch.testing.assert_close(a, b, rtol, atol)
- torch.testing.assert_allclose(a, b, rtol, atol)
===========changed ref 5===========
# module: tests.test_functional
@pytest.mark.parametrize(
"dtype", [torch.float32, torch.float16], ids=["float", "half"]
)
def test_estimate_quantiles(dtype):
A = torch.rand(1024, 1024, device="cuda")
A = A.to(dtype)
code = F.estimate_quantiles(A)
percs = torch.linspace(1 / 512, 511 / 512, 256, device=A.device)
+ torch.testing.assert_close(percs, code, atol=1e-3, rtol=1e-2)
- torch.testing.assert_allclose(percs, code, atol=1e-3, rtol=1e-2)
A = torch.randn(1024, 1024, device="cuda")
A = A.to(dtype)
code = F.estimate_quantiles(A)
quantiles = torch.quantile(A.float(), percs)
diff = torch.abs(code - quantiles)
assert (diff > 5e-02).sum().item() == 0
|
tests.test_functional/test_colrow_absmax
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<31>:<add> torch.testing.assert_close(col_stats1_trunc, col_stats2)
<del> torch.testing.assert_allclose(col
|
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dims", values, ids=names)
def test_colrow_absmax(dim1, dim2, dims):
<0> for i in range(k):
<1> threshold = 3.0
<2> A = torch.randn(dim1, dim2, device="cuda").half()
<3> A_truncated = A.clone()
<4> A_truncated[torch.abs(A_truncated) >= 3.0] = 0.0
<5> if dims == 2:
<6> row_stats1, _ = torch.abs(A.float()).max(1)
<7> col_stats1, _ = torch.abs(A.float()).max(0)
<8> row_stats1_trunc, _ = torch.abs(A_truncated.float()).max(1)
<9> col_stats1_trunc, _ = torch.abs(A_truncated.float()).max(0)
<10> else:
<11> assert False
<12>
<13> row_stats2, col_stats2, nnz_block_ptr2 = F.get_colrow_absmax(
<14> A, threshold=threshold
<15> )
<16>
<17> A_blocked = einops.rearrange(
<18> torch.abs(A),
<19> "(rows row_tiles) (cols block_size)-> rows cols row_tiles block_size",
<20> row_tiles=16,
<21> block_size=64 * 4,
<22> )
<23> nnz_rows1_counts = (torch.abs(A_blocked) >= threshold).sum(3).flatten()
<24> nnz_block_ptr1 = torch.zeros(
<25> nnz_rows1_counts.shape[0] + 1,
<26> dtype=nnz_rows1_counts.dtype,
<27> device=nnz_rows1_counts.device,
<28> )
<29> nnz_block_ptr1[1:] = nnz_rows1_counts.cumsum(0)
<30>
<31> torch.testing.assert_allclose(col</s>
|
===========below chunk 0===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dims", values, ids=names)
def test_colrow_absmax(dim1, dim2, dims):
# offset: 1
torch.testing.assert_allclose(row_stats1_trunc, row_stats2)
torch.testing.assert_allclose(nnz_block_ptr1, nnz_block_ptr2)
row_stats2, col_stats2, nnz_block_ptr2 = F.get_colrow_absmax(
A, threshold=0.0
)
torch.testing.assert_allclose(col_stats1, col_stats2)
torch.testing.assert_allclose(row_stats1, row_stats2)
assert nnz_block_ptr2 is None
===========changed ref 0===========
# module: tests.test_functional
# print('')
# print(output.flatten()[:10])
# print(C1.flatten()[:10])
# print(C2.flatten()[:10])
+ # torch.testing.assert_close(C1.view(-1, C1.shape[-1]), output, atol=0.025, rtol=0.05)
- # torch.testing.assert_allclose(C1.view(-1, C1.shape[-1]), output, atol=0.025, rtol=0.05)
# transpose
# B = torch.randint(-128, 127, size=(dim3, dim4), device='cuda').to(torch.int8)
# C1 = torch.matmul(A.float(), B.float())
# B2t, SBt = F.transform2(B, 'col_turing', transpose=True)
# C2, SC = F.igemmlt(A2, B2t, SA, SBt)
# C3, S = F.transform(C2, 'row', state=SC)
+ # torch.testing.assert_close(C1, C3.float())
- # torch.testing.assert_allclose(C1, C3.float())
batch_size = 2
seqdim = 512
# values = [(batch_size, seqdim, 4*1024, 16*1024),(batch_size, seqdim, 5120, 4*5120),(batch_size, seqdim, 12*1024, 4*12*1024)]
values = [
(batch_size, seqdim, 4 * 1024, 3 * 4 * 1024),
(batch_size, seqdim, 5120, 3 * 5120),
(batch_size, seqdim, 12 * 1024, 4 * 12 * 1024),
]
# values = list(product(batch, seq, model, hidden))
names = [
"batch_{}_seq_{}_model_{}_hidden_{}".format(*vals) for vals in values
]
===========changed ref 1===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dim3, dim4, dims, ldb", values, ids=names)
def test_igemmlt_int(dim1, dim2, dim3, dim4, dims, ldb):
for i in range(k):
if dims == 2:
A = torch.randint(-128, 127, size=(dim1, dim3), device="cuda").to(
torch.int8
)
elif dims == 3:
A = torch.randint(
-128, 127, size=(dim1, dim2, dim3), device="cuda"
).to(torch.int8)
B = torch.randint(-128, 127, size=(dim4, dim3), device="cuda").to(
torch.int8
)
C1 = torch.matmul(A.float(), B.t().float())
A2, SA = F.transform(A, "col32")
B2, SB = F.transform(B, "col_turing")
C2, SC = F.igemmlt(A2, B2, SA, SB)
C3, S = F.nvidia_transform(C2, "row", state=SC)
+ torch.testing.assert_close(C1, C3.float())
- torch.testing.assert_allclose(C1, C3.float())
# transpose
B = torch.randint(-128, 127, size=(dim3, dim4), device="cuda").to(
torch.int8
)
C1 = torch.matmul(A.float(), B.float())
B2t, SBt = F.transform(B, "col_turing", transpose=True)
C2, SC = F.igemmlt(A2, B2t, SA, SBt)
C3, S = F.nvidia_transform(C2, "row", state=SC)
+ torch.testing.assert_close</s>
===========changed ref 2===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dim3, dim4, dims, ldb", values, ids=names)
def test_igemmlt_int(dim1, dim2, dim3, dim4, dims, ldb):
# offset: 1
<s> = F.nvidia_transform(C2, "row", state=SC)
+ torch.testing.assert_close(C1, C3.float())
- torch.testing.assert_allclose(C1, C3.float())
===========changed ref 3===========
# module: tests.test_functional
@pytest.mark.parametrize("seq_dim, hidden_dim, batch_dim", values, ids=names)
def test_dim3_igemm(seq_dim, hidden_dim, batch_dim):
seq_dim = seq_dim - (seq_dim % 32)
hidden_dim = hidden_dim - (hidden_dim % 32)
batch_dim = batch_dim - (batch_dim % 2)
for i in range(25):
A = torch.randint(
-128, 127, size=(batch_dim, seq_dim, hidden_dim), device="cuda"
).to(torch.int8)
B = torch.randint(
-128, 127, size=(batch_dim, seq_dim, 1024), device="cuda"
).to(torch.int8)
out2 = torch.einsum("bsi, bso->io", A.float(), B.float())
iout = torch.empty(
A.shape[2], B.shape[2], dtype=torch.int32, device=A.device
)
out = F.igemm(A, B, out=iout)
+ torch.testing.assert_close(out.float(), out2)
- torch.testing.assert_allclose(out.float(), out2)
|
tests.test_functional/test_double_quant
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<8>:<add> torch.testing.assert_close(CA, out_row1, atol=1, rtol=0)
<del> torch.testing.assert_allclose(CA, out_row1, atol=1, rtol=0)
<9>:<add> torch.testing.assert_close(CAt, out_col1, atol=1, rtol=0)
<del> torch.testing.assert_allclose(CAt, out_col1, atol=1, rtol=0)
<32>:<add> torch.testing.assert_close(Srow.flatten().float(), statsA)
<del> torch.testing.assert_allclose
|
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2", values, ids=names)
def test_double_quant(dim1, dim2):
<0> for i in range(k):
<1> A = torch.randn(dim1, dim2, device="cuda").half()
<2> out_col1, Scol = F.vectorwise_quant(A, dim=0)
<3> out_row1, Srow = F.vectorwise_quant(A, dim=1)
<4>
<5> CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
<6>
<7> # max difference is 1 due to rounding differences
<8> torch.testing.assert_allclose(CA, out_row1, atol=1, rtol=0)
<9> torch.testing.assert_allclose(CAt, out_col1, atol=1, rtol=0)
<10>
<11> n = CAt.numel()
<12> num_not_close_rows = (
<13> (torch.isclose(CA, out_row1, atol=1) == 0).sum().item()
<14> )
<15> num_not_close_cols = (
<16> (torch.isclose(CAt, out_col1, atol=1) == 0).sum().item()
<17> )
<18>
<19> # allow for 1:500 error due to rounding differences
<20> min_error = 1 / 500
<21> if num_not_close_cols > (min_error * n):
<22> print(
<23> f"Min error exceeded {num_not_close_cols} elements are different. Error: {num_not_close_cols/n:.4f}"
<24> )
<25> assert False
<26> if num_not_close_rows > (min_error * n):
<27> print(
<28> f"Min error exceeded {num_not_close_rows} elements are different. Error: {num_not_close_rows/n:.4f}"
<29> )
<30> assert False
<31>
<32> torch.testing.assert_allclose</s>
|
===========below chunk 0===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2", values, ids=names)
def test_double_quant(dim1, dim2):
# offset: 1
torch.testing.assert_allclose(Scol.flatten(), statsAt)
===========changed ref 0===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dims", values, ids=names)
def test_colrow_absmax(dim1, dim2, dims):
for i in range(k):
threshold = 3.0
A = torch.randn(dim1, dim2, device="cuda").half()
A_truncated = A.clone()
A_truncated[torch.abs(A_truncated) >= 3.0] = 0.0
if dims == 2:
row_stats1, _ = torch.abs(A.float()).max(1)
col_stats1, _ = torch.abs(A.float()).max(0)
row_stats1_trunc, _ = torch.abs(A_truncated.float()).max(1)
col_stats1_trunc, _ = torch.abs(A_truncated.float()).max(0)
else:
assert False
row_stats2, col_stats2, nnz_block_ptr2 = F.get_colrow_absmax(
A, threshold=threshold
)
A_blocked = einops.rearrange(
torch.abs(A),
"(rows row_tiles) (cols block_size)-> rows cols row_tiles block_size",
row_tiles=16,
block_size=64 * 4,
)
nnz_rows1_counts = (torch.abs(A_blocked) >= threshold).sum(3).flatten()
nnz_block_ptr1 = torch.zeros(
nnz_rows1_counts.shape[0] + 1,
dtype=nnz_rows1_counts.dtype,
device=nnz_rows1_counts.device,
)
nnz_block_ptr1[1:] = nnz_rows1_counts.cumsum(0)
+ torch.testing.assert_close(col_stats1_trunc, col_stats2)
- torch.testing.assert_allclose(col_stats1_trunc, col</s>
===========changed ref 1===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dims", values, ids=names)
def test_colrow_absmax(dim1, dim2, dims):
# offset: 1
<s>1_trunc, col_stats2)
- torch.testing.assert_allclose(col_stats1_trunc, col_stats2)
+ torch.testing.assert_close(row_stats1_trunc, row_stats2)
- torch.testing.assert_allclose(row_stats1_trunc, row_stats2)
+ torch.testing.assert_close(nnz_block_ptr1.int(), nnz_block_ptr2)
- torch.testing.assert_allclose(nnz_block_ptr1, nnz_block_ptr2)
row_stats2, col_stats2, nnz_block_ptr2 = F.get_colrow_absmax(
A, threshold=0.0
)
+ torch.testing.assert_close(col_stats1, col_stats2)
- torch.testing.assert_allclose(col_stats1, col_stats2)
+ torch.testing.assert_close(row_stats1, row_stats2)
- torch.testing.assert_allclose(row_stats1, row_stats2)
assert nnz_block_ptr2 is None
===========changed ref 2===========
# module: tests.test_functional
# print('')
# print(output.flatten()[:10])
# print(C1.flatten()[:10])
# print(C2.flatten()[:10])
+ # torch.testing.assert_close(C1.view(-1, C1.shape[-1]), output, atol=0.025, rtol=0.05)
- # torch.testing.assert_allclose(C1.view(-1, C1.shape[-1]), output, atol=0.025, rtol=0.05)
# transpose
# B = torch.randint(-128, 127, size=(dim3, dim4), device='cuda').to(torch.int8)
# C1 = torch.matmul(A.float(), B.float())
# B2t, SBt = F.transform2(B, 'col_turing', transpose=True)
# C2, SC = F.igemmlt(A2, B2t, SA, SBt)
# C3, S = F.transform(C2, 'row', state=SC)
+ # torch.testing.assert_close(C1, C3.float())
- # torch.testing.assert_allclose(C1, C3.float())
batch_size = 2
seqdim = 512
# values = [(batch_size, seqdim, 4*1024, 16*1024),(batch_size, seqdim, 5120, 4*5120),(batch_size, seqdim, 12*1024, 4*12*1024)]
values = [
(batch_size, seqdim, 4 * 1024, 3 * 4 * 1024),
(batch_size, seqdim, 5120, 3 * 5120),
(batch_size, seqdim, 12 * 1024, 4 * 12 * 1024),
]
# values = list(product(batch, seq, model, hidden))
names = [
"batch_{}_seq_{}_model_{}_hidden_{}".format(*vals) for vals in values
]
===========changed ref 3===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dim3, dim4, dims, ldb", values, ids=names)
def test_igemmlt_int(dim1, dim2, dim3, dim4, dims, ldb):
for i in range(k):
if dims == 2:
A = torch.randint(-128, 127, size=(dim1, dim3), device="cuda").to(
torch.int8
)
elif dims == 3:
A = torch.randint(
-128, 127, size=(dim1, dim2, dim3), device="cuda"
).to(torch.int8)
B = torch.randint(-128, 127, size=(dim4, dim3), device="cuda").to(
torch.int8
)
C1 = torch.matmul(A.float(), B.t().float())
A2, SA = F.transform(A, "col32")
B2, SB = F.transform(B, "col_turing")
C2, SC = F.igemmlt(A2, B2, SA, SB)
C3, S = F.nvidia_transform(C2, "row", state=SC)
+ torch.testing.assert_close(C1, C3.float())
- torch.testing.assert_allclose(C1, C3.float())
# transpose
B = torch.randint(-128, 127, size=(dim3, dim4), device="cuda").to(
torch.int8
)
C1 = torch.matmul(A.float(), B.float())
B2t, SBt = F.transform(B, "col_turing", transpose=True)
C2, SC = F.igemmlt(A2, B2t, SA, SBt)
C3, S = F.nvidia_transform(C2, "row", state=SC)
+ torch.testing.assert_close</s>
|
tests.test_functional/test_integrated_igemmlt
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<11>:<add> torch.testing.assert_close(maxA.flatten().float(), stats1a)
<del> torch.testing.assert_allclose(maxA.flatten(), stats1a)
<12>:<add> torch.testing.assert_close(maxB.flatten().float(), stats2a)
<del> torch.testing.assert_allclose(maxB.flatten(), stats2a)
<13>:<add> torch.testing.assert_close(C1a, A1, rtol=0, atol=1)
<del> torch.testing.assert_allclose(C1a, A1, rtol=0, atol=1)
<14>:<add> torch.testing.assert_close(C2a, B1, rtol=0, atol=1)
<del> torch.testing.assert_allclose(C2a, B1, rtol=0, atol=1)
|
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim4, inner", values, ids=names)
def test_integrated_igemmlt(dim1, dim4, inner):
<0> for i in range(k):
<1> A = torch.randn(dim1, inner, device="cuda").half()
<2> B = torch.randn(dim4, inner, device="cuda").half()
<3>
<4> out1 = torch.matmul(A.half(), B.t().half())
<5>
<6> C1a, C1b, stats1a, stats1b, coo_tensor = F.double_quant(A)
<7> C2a, C2b, stats2a, stats2b, coo_tensor = F.double_quant(B)
<8> A1, maxA = F.vectorwise_quant(A, dim=1)
<9> B1, maxB = F.vectorwise_quant(B, dim=1)
<10>
<11> torch.testing.assert_allclose(maxA.flatten(), stats1a)
<12> torch.testing.assert_allclose(maxB.flatten(), stats2a)
<13> torch.testing.assert_allclose(C1a, A1, rtol=0, atol=1)
<14> torch.testing.assert_allclose(C2a, B1, rtol=0, atol=1)
<15>
<16> A2, SA = F.nvidia_transform(C1a, "col32")
<17> B2, SB = F.nvidia_transform(C2a, "col_turing")
<18> outC32, SC = F.igemmlt(A2, B2, SA, SB)
<19> out2 = F.mm_dequant(outC32, SC, stats1a, stats2a)
<20>
<21> A2, SA = F.nvidia_transform(A1, "col32")
<22> B2, SB = F.nvidia_transform(B1, "col_turing")
<23> C2, SC =</s>
|
===========below chunk 0===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim4, inner", values, ids=names)
def test_integrated_igemmlt(dim1, dim4, inner):
# offset: 1
C3, S = F.nvidia_transform(C2, "row", state=SC)
out3 = F.vectorwise_mm_dequant(C3.float(), maxA, maxB.t())
err1 = torch.abs(out1 - out2).mean().item()
err2 = torch.abs(out1 - out3).mean().item()
assert err2 <= err1 * 1.025
===========changed ref 0===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2", values, ids=names)
def test_double_quant(dim1, dim2):
for i in range(k):
A = torch.randn(dim1, dim2, device="cuda").half()
out_col1, Scol = F.vectorwise_quant(A, dim=0)
out_row1, Srow = F.vectorwise_quant(A, dim=1)
CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
# max difference is 1 due to rounding differences
+ torch.testing.assert_close(CA, out_row1, atol=1, rtol=0)
- torch.testing.assert_allclose(CA, out_row1, atol=1, rtol=0)
+ torch.testing.assert_close(CAt, out_col1, atol=1, rtol=0)
- torch.testing.assert_allclose(CAt, out_col1, atol=1, rtol=0)
n = CAt.numel()
num_not_close_rows = (
(torch.isclose(CA, out_row1, atol=1) == 0).sum().item()
)
num_not_close_cols = (
(torch.isclose(CAt, out_col1, atol=1) == 0).sum().item()
)
# allow for 1:500 error due to rounding differences
min_error = 1 / 500
if num_not_close_cols > (min_error * n):
print(
f"Min error exceeded {num_not_close_cols} elements are different. Error: {num_not_close_cols/n:.4f}"
)
assert False
if num_not_close_rows > (min_error * n):
print(
f"Min error exceeded {num_not_close_rows} elements are different. Error: {num_not_close_</s>
===========changed ref 1===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2", values, ids=names)
def test_double_quant(dim1, dim2):
# offset: 1
<s>(
f"Min error exceeded {num_not_close_rows} elements are different. Error: {num_not_close_rows/n:.4f}"
)
assert False
+ torch.testing.assert_close(Srow.flatten().float(), statsA)
- torch.testing.assert_allclose(Srow.flatten(), statsA)
+ torch.testing.assert_close(Scol.flatten().float(), statsAt)
- torch.testing.assert_allclose(Scol.flatten(), statsAt)
===========changed ref 2===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dims", values, ids=names)
def test_colrow_absmax(dim1, dim2, dims):
for i in range(k):
threshold = 3.0
A = torch.randn(dim1, dim2, device="cuda").half()
A_truncated = A.clone()
A_truncated[torch.abs(A_truncated) >= 3.0] = 0.0
if dims == 2:
row_stats1, _ = torch.abs(A.float()).max(1)
col_stats1, _ = torch.abs(A.float()).max(0)
row_stats1_trunc, _ = torch.abs(A_truncated.float()).max(1)
col_stats1_trunc, _ = torch.abs(A_truncated.float()).max(0)
else:
assert False
row_stats2, col_stats2, nnz_block_ptr2 = F.get_colrow_absmax(
A, threshold=threshold
)
A_blocked = einops.rearrange(
torch.abs(A),
"(rows row_tiles) (cols block_size)-> rows cols row_tiles block_size",
row_tiles=16,
block_size=64 * 4,
)
nnz_rows1_counts = (torch.abs(A_blocked) >= threshold).sum(3).flatten()
nnz_block_ptr1 = torch.zeros(
nnz_rows1_counts.shape[0] + 1,
dtype=nnz_rows1_counts.dtype,
device=nnz_rows1_counts.device,
)
nnz_block_ptr1[1:] = nnz_rows1_counts.cumsum(0)
+ torch.testing.assert_close(col_stats1_trunc, col_stats2)
- torch.testing.assert_allclose(col_stats1_trunc, col</s>
===========changed ref 3===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dims", values, ids=names)
def test_colrow_absmax(dim1, dim2, dims):
# offset: 1
<s>1_trunc, col_stats2)
- torch.testing.assert_allclose(col_stats1_trunc, col_stats2)
+ torch.testing.assert_close(row_stats1_trunc, row_stats2)
- torch.testing.assert_allclose(row_stats1_trunc, row_stats2)
+ torch.testing.assert_close(nnz_block_ptr1.int(), nnz_block_ptr2)
- torch.testing.assert_allclose(nnz_block_ptr1, nnz_block_ptr2)
row_stats2, col_stats2, nnz_block_ptr2 = F.get_colrow_absmax(
A, threshold=0.0
)
+ torch.testing.assert_close(col_stats1, col_stats2)
- torch.testing.assert_allclose(col_stats1, col_stats2)
+ torch.testing.assert_close(row_stats1, row_stats2)
- torch.testing.assert_allclose(row_stats1, row_stats2)
assert nnz_block_ptr2 is None
|
tests.test_functional/test_transform
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<23>:<add> torch.testing.assert_close(out1, out2)
<del> torch.testing.assert_allclose(out1, out2)
|
# module: tests.test_functional
@pytest.mark.parametrize(
"dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose",
values,
ids=names,
)
def test_transform(dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose):
<0> for i in range(k):
<1> if dims == 2:
<2> A = torch.randint(10, 99, size=(dim1, dim2), device="cuda").to(
<3> dtype
<4> )
<5> elif dims == 3:
<6> A = torch.randint(
<7> 10, 99, size=(dim1, dim2, dim3), device="cuda"
<8> ).to(dtype)
<9>
<10> A.view(-1)[-1] = -1
<11> if transpose:
<12> At = A.t().contiguous()
<13> out1, S1 = F.nvidia_transform(At, to_order=orderOut)
<14> else:
<15> out1, S1 = F.nvidia_transform(A, to_order=orderOut)
<16> out2, S2 = F.transform(A, to_order=orderOut, transpose=transpose)
<17>
<18> assert S1[0][0] == S2[0][0]
<19> assert S1[0][1] == S2[0][1]
<20> # print(out1)
<21> # print(out2)
<22>
<23> torch.testing.assert_allclose(out1, out2)
<24>
|
===========changed ref 0===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2", values, ids=names)
def test_double_quant(dim1, dim2):
for i in range(k):
A = torch.randn(dim1, dim2, device="cuda").half()
out_col1, Scol = F.vectorwise_quant(A, dim=0)
out_row1, Srow = F.vectorwise_quant(A, dim=1)
CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
# max difference is 1 due to rounding differences
+ torch.testing.assert_close(CA, out_row1, atol=1, rtol=0)
- torch.testing.assert_allclose(CA, out_row1, atol=1, rtol=0)
+ torch.testing.assert_close(CAt, out_col1, atol=1, rtol=0)
- torch.testing.assert_allclose(CAt, out_col1, atol=1, rtol=0)
n = CAt.numel()
num_not_close_rows = (
(torch.isclose(CA, out_row1, atol=1) == 0).sum().item()
)
num_not_close_cols = (
(torch.isclose(CAt, out_col1, atol=1) == 0).sum().item()
)
# allow for 1:500 error due to rounding differences
min_error = 1 / 500
if num_not_close_cols > (min_error * n):
print(
f"Min error exceeded {num_not_close_cols} elements are different. Error: {num_not_close_cols/n:.4f}"
)
assert False
if num_not_close_rows > (min_error * n):
print(
f"Min error exceeded {num_not_close_rows} elements are different. Error: {num_not_close_</s>
===========changed ref 1===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2", values, ids=names)
def test_double_quant(dim1, dim2):
# offset: 1
<s>(
f"Min error exceeded {num_not_close_rows} elements are different. Error: {num_not_close_rows/n:.4f}"
)
assert False
+ torch.testing.assert_close(Srow.flatten().float(), statsA)
- torch.testing.assert_allclose(Srow.flatten(), statsA)
+ torch.testing.assert_close(Scol.flatten().float(), statsAt)
- torch.testing.assert_allclose(Scol.flatten(), statsAt)
===========changed ref 2===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim4, inner", values, ids=names)
def test_integrated_igemmlt(dim1, dim4, inner):
for i in range(k):
A = torch.randn(dim1, inner, device="cuda").half()
B = torch.randn(dim4, inner, device="cuda").half()
out1 = torch.matmul(A.half(), B.t().half())
C1a, C1b, stats1a, stats1b, coo_tensor = F.double_quant(A)
C2a, C2b, stats2a, stats2b, coo_tensor = F.double_quant(B)
A1, maxA = F.vectorwise_quant(A, dim=1)
B1, maxB = F.vectorwise_quant(B, dim=1)
+ torch.testing.assert_close(maxA.flatten().float(), stats1a)
- torch.testing.assert_allclose(maxA.flatten(), stats1a)
+ torch.testing.assert_close(maxB.flatten().float(), stats2a)
- torch.testing.assert_allclose(maxB.flatten(), stats2a)
+ torch.testing.assert_close(C1a, A1, rtol=0, atol=1)
- torch.testing.assert_allclose(C1a, A1, rtol=0, atol=1)
+ torch.testing.assert_close(C2a, B1, rtol=0, atol=1)
- torch.testing.assert_allclose(C2a, B1, rtol=0, atol=1)
A2, SA = F.nvidia_transform(C1a, "col32")
B2, SB = F.nvidia_transform(C2a, "col_turing")
outC32, SC = F.igemmlt(A2, B2, SA, SB</s>
===========changed ref 3===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim4, inner", values, ids=names)
def test_integrated_igemmlt(dim1, dim4, inner):
# offset: 1
<s>col_turing")
outC32, SC = F.igemmlt(A2, B2, SA, SB)
out2 = F.mm_dequant(outC32, SC, stats1a, stats2a)
A2, SA = F.nvidia_transform(A1, "col32")
B2, SB = F.nvidia_transform(B1, "col_turing")
C2, SC = F.igemmlt(A2, B2, SA, SB)
C3, S = F.nvidia_transform(C2, "row", state=SC)
out3 = F.vectorwise_mm_dequant(C3.float(), maxA, maxB.t())
err1 = torch.abs(out1 - out2).mean().item()
err2 = torch.abs(out1 - out3).mean().item()
assert err2 <= err1 * 1.025
|
tests.test_functional/test_coo_double_quant
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<16>:<add> torch.testing.assert_close(A1, A2)
<del> torch.testing.assert_allclose(A1, A2)
<20>:<add> torch.testing.assert_close(
<del> torch.testing.assert_allclose(
|
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2", values, ids=names)
def test_coo_double_quant(dim1, dim2):
<0> threshold = 3.00
<1> for i in range(k):
<2> A = torch.randn(dim1, dim2, device="cuda").half()
<3>
<4> idx = torch.abs(A) >= threshold
<5> CA2, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
<6> CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(
<7> A, threshold=threshold
<8> )
<9>
<10> if coo_tensor is not None:
<11> A1 = A * idx
<12> A2 = torch.zeros_like(A)
<13> A2[
<14> coo_tensor.rowidx.long(), coo_tensor.colidx.long()
<15> ] = coo_tensor.values
<16> torch.testing.assert_allclose(A1, A2)
<17>
<18> A1 = A * (idx == 0)
<19> A2 = (CA.float() * statsA.unsqueeze(1) / 127).half()
<20> torch.testing.assert_allclose(
<21> A * (idx == 0), A2, rtol=0.05, atol=1.5e-2
<22> )
<23>
|
===========changed ref 0===========
# module: tests.test_functional
@pytest.mark.parametrize(
"dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose",
values,
ids=names,
)
def test_transform(dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose):
for i in range(k):
if dims == 2:
A = torch.randint(10, 99, size=(dim1, dim2), device="cuda").to(
dtype
)
elif dims == 3:
A = torch.randint(
10, 99, size=(dim1, dim2, dim3), device="cuda"
).to(dtype)
A.view(-1)[-1] = -1
if transpose:
At = A.t().contiguous()
out1, S1 = F.nvidia_transform(At, to_order=orderOut)
else:
out1, S1 = F.nvidia_transform(A, to_order=orderOut)
out2, S2 = F.transform(A, to_order=orderOut, transpose=transpose)
assert S1[0][0] == S2[0][0]
assert S1[0][1] == S2[0][1]
# print(out1)
# print(out2)
+ torch.testing.assert_close(out1, out2)
- torch.testing.assert_allclose(out1, out2)
===========changed ref 1===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2", values, ids=names)
def test_double_quant(dim1, dim2):
for i in range(k):
A = torch.randn(dim1, dim2, device="cuda").half()
out_col1, Scol = F.vectorwise_quant(A, dim=0)
out_row1, Srow = F.vectorwise_quant(A, dim=1)
CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
# max difference is 1 due to rounding differences
+ torch.testing.assert_close(CA, out_row1, atol=1, rtol=0)
- torch.testing.assert_allclose(CA, out_row1, atol=1, rtol=0)
+ torch.testing.assert_close(CAt, out_col1, atol=1, rtol=0)
- torch.testing.assert_allclose(CAt, out_col1, atol=1, rtol=0)
n = CAt.numel()
num_not_close_rows = (
(torch.isclose(CA, out_row1, atol=1) == 0).sum().item()
)
num_not_close_cols = (
(torch.isclose(CAt, out_col1, atol=1) == 0).sum().item()
)
# allow for 1:500 error due to rounding differences
min_error = 1 / 500
if num_not_close_cols > (min_error * n):
print(
f"Min error exceeded {num_not_close_cols} elements are different. Error: {num_not_close_cols/n:.4f}"
)
assert False
if num_not_close_rows > (min_error * n):
print(
f"Min error exceeded {num_not_close_rows} elements are different. Error: {num_not_close_</s>
===========changed ref 2===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2", values, ids=names)
def test_double_quant(dim1, dim2):
# offset: 1
<s>(
f"Min error exceeded {num_not_close_rows} elements are different. Error: {num_not_close_rows/n:.4f}"
)
assert False
+ torch.testing.assert_close(Srow.flatten().float(), statsA)
- torch.testing.assert_allclose(Srow.flatten(), statsA)
+ torch.testing.assert_close(Scol.flatten().float(), statsAt)
- torch.testing.assert_allclose(Scol.flatten(), statsAt)
===========changed ref 3===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim4, inner", values, ids=names)
def test_integrated_igemmlt(dim1, dim4, inner):
for i in range(k):
A = torch.randn(dim1, inner, device="cuda").half()
B = torch.randn(dim4, inner, device="cuda").half()
out1 = torch.matmul(A.half(), B.t().half())
C1a, C1b, stats1a, stats1b, coo_tensor = F.double_quant(A)
C2a, C2b, stats2a, stats2b, coo_tensor = F.double_quant(B)
A1, maxA = F.vectorwise_quant(A, dim=1)
B1, maxB = F.vectorwise_quant(B, dim=1)
+ torch.testing.assert_close(maxA.flatten().float(), stats1a)
- torch.testing.assert_allclose(maxA.flatten(), stats1a)
+ torch.testing.assert_close(maxB.flatten().float(), stats2a)
- torch.testing.assert_allclose(maxB.flatten(), stats2a)
+ torch.testing.assert_close(C1a, A1, rtol=0, atol=1)
- torch.testing.assert_allclose(C1a, A1, rtol=0, atol=1)
+ torch.testing.assert_close(C2a, B1, rtol=0, atol=1)
- torch.testing.assert_allclose(C2a, B1, rtol=0, atol=1)
A2, SA = F.nvidia_transform(C1a, "col32")
B2, SB = F.nvidia_transform(C2a, "col_turing")
outC32, SC = F.igemmlt(A2, B2, SA, SB</s>
===========changed ref 4===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim4, inner", values, ids=names)
def test_integrated_igemmlt(dim1, dim4, inner):
# offset: 1
<s>col_turing")
outC32, SC = F.igemmlt(A2, B2, SA, SB)
out2 = F.mm_dequant(outC32, SC, stats1a, stats2a)
A2, SA = F.nvidia_transform(A1, "col32")
B2, SB = F.nvidia_transform(B1, "col_turing")
C2, SC = F.igemmlt(A2, B2, SA, SB)
C3, S = F.nvidia_transform(C2, "row", state=SC)
out3 = F.vectorwise_mm_dequant(C3.float(), maxA, maxB.t())
err1 = torch.abs(out1 - out2).mean().item()
err2 = torch.abs(out1 - out3).mean().item()
assert err2 <= err1 * 1.025
===========changed ref 5===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
|
tests.test_functional/test_coo2csr
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<14>:<add> torch.testing.assert_close(counts.long(), (A2 != 0).sum(1))
<del> torch.testing.assert_allclose(counts, (A2 != 0).sum(1))
<16>:<add> torch.testing.assert_close(A2[idx], csrA.values)
<del> torch.testing.assert_allclose(A2[idx], csrA.values)
|
<s> #out3 = F.spmm_coo(cooA, Bt.t())
# #out2 = F.spmm_coo(cooA, B)
# #out2 = F.spmm_coo_very_sparse(cooA, B)
# #out1 = torch.matmul(A, Bt.t())
# torch.cuda.synchronize()
# print(time.time() - t0)
def test_coo2csr():
<0> threshold = 1
<1> A = torch.randn(128, 128).half().cuda()
<2> idx = torch.abs(A) >= threshold
<3> nnz = (idx == 1).sum().item()
<4> rows, cols = torch.where(idx)
<5> values = A[idx]
<6> cooA = F.COOSparseTensor(
<7> A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
<8> )
<9> A2 = A * idx
<10> csrA = F.coo2csr(cooA)
<11> counts = csrA.rowptr[1:] - csrA.rowptr[:-1]
<12> assert counts.numel() == A.shape[0]
<13>
<14> torch.testing.assert_allclose(counts, (A2 != 0).sum(1))
<15> idx = A2 != 0
<16> torch.testing.assert_allclose(A2[idx], csrA.values)
<17>
|
===========changed ref 0===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2", values, ids=names)
def test_coo_double_quant(dim1, dim2):
threshold = 3.00
for i in range(k):
A = torch.randn(dim1, dim2, device="cuda").half()
idx = torch.abs(A) >= threshold
CA2, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(
A, threshold=threshold
)
if coo_tensor is not None:
A1 = A * idx
A2 = torch.zeros_like(A)
A2[
coo_tensor.rowidx.long(), coo_tensor.colidx.long()
] = coo_tensor.values
+ torch.testing.assert_close(A1, A2)
- torch.testing.assert_allclose(A1, A2)
A1 = A * (idx == 0)
A2 = (CA.float() * statsA.unsqueeze(1) / 127).half()
+ torch.testing.assert_close(
- torch.testing.assert_allclose(
A * (idx == 0), A2, rtol=0.05, atol=1.5e-2
)
===========changed ref 1===========
# module: tests.test_functional
@pytest.mark.parametrize(
"dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose",
values,
ids=names,
)
def test_transform(dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose):
for i in range(k):
if dims == 2:
A = torch.randint(10, 99, size=(dim1, dim2), device="cuda").to(
dtype
)
elif dims == 3:
A = torch.randint(
10, 99, size=(dim1, dim2, dim3), device="cuda"
).to(dtype)
A.view(-1)[-1] = -1
if transpose:
At = A.t().contiguous()
out1, S1 = F.nvidia_transform(At, to_order=orderOut)
else:
out1, S1 = F.nvidia_transform(A, to_order=orderOut)
out2, S2 = F.transform(A, to_order=orderOut, transpose=transpose)
assert S1[0][0] == S2[0][0]
assert S1[0][1] == S2[0][1]
# print(out1)
# print(out2)
+ torch.testing.assert_close(out1, out2)
- torch.testing.assert_allclose(out1, out2)
===========changed ref 2===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 3===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 4===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 8===========
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
+
===========changed ref 9===========
# module: bitsandbytes.functional
+ def dequantize_fp4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'fp4')
+
===========changed ref 10===========
# module: bitsandbytes.functional
+ def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
+
===========changed ref 11===========
# module: bitsandbytes.functional
+ def dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'nf4')
+
===========changed ref 12===========
# module: bitsandbytes.nn.modules
+ class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
+
===========changed ref 13===========
# module: bitsandbytes.nn.modules
+ class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
+
===========changed ref 14===========
# module: bitsandbytes.autograd._functions
+ def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
+ assert quant_state is not None
+ return MatMul4Bit.apply(A, B, out, bias, quant_state)
+
===========changed ref 15===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ @classmethod
+ def get_instance(cls):
+ if cls._instance is None:
+ cls._instance = cls.__new__(cls)
+ cls._instance.initialize()
+ return cls._instance
+
===========changed ref 16===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
+ self.error = False
===========changed ref 17===========
# module: bitsandbytes.optim.adam
+ class PagedAdam32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
|
tests.test_functional/test_coo2csc
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<14>:<add> torch.testing.assert_close(counts.long(), (A2 != 0).sum(0))
<del> torch.testing.assert_allclose(counts, (A2 != 0).sum(0))
<17>:<add> torch.testing.assert_close(A2.t()[idx], cscA.values)
<del> torch.testing.assert_allclose(A2.t()[idx], cscA.values)
|
# module: tests.test_functional
def test_coo2csc():
<0> threshold = 1
<1> A = torch.randn(128, 128).half().cuda()
<2> idx = torch.abs(A) >= threshold
<3> nnz = (idx == 1).sum().item()
<4> rows, cols = torch.where(idx)
<5> values = A[idx]
<6> cooA = F.COOSparseTensor(
<7> A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
<8> )
<9> A2 = A * idx
<10> cscA = F.coo2csc(cooA)
<11> counts = cscA.colptr[1:] - cscA.colptr[:-1]
<12> assert counts.numel() == A.shape[1]
<13>
<14> torch.testing.assert_allclose(counts, (A2 != 0).sum(0))
<15> # torch uses row-major -> use transpose to transfer to col-major
<16> idx = A2.t() != 0
<17> torch.testing.assert_allclose(A2.t()[idx], cscA.values)
<18>
|
===========changed ref 0===========
<s> #out3 = F.spmm_coo(cooA, Bt.t())
# #out2 = F.spmm_coo(cooA, B)
# #out2 = F.spmm_coo_very_sparse(cooA, B)
# #out1 = torch.matmul(A, Bt.t())
# torch.cuda.synchronize()
# print(time.time() - t0)
def test_coo2csr():
threshold = 1
A = torch.randn(128, 128).half().cuda()
idx = torch.abs(A) >= threshold
nnz = (idx == 1).sum().item()
rows, cols = torch.where(idx)
values = A[idx]
cooA = F.COOSparseTensor(
A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
)
A2 = A * idx
csrA = F.coo2csr(cooA)
counts = csrA.rowptr[1:] - csrA.rowptr[:-1]
assert counts.numel() == A.shape[0]
+ torch.testing.assert_close(counts.long(), (A2 != 0).sum(1))
- torch.testing.assert_allclose(counts, (A2 != 0).sum(1))
idx = A2 != 0
+ torch.testing.assert_close(A2[idx], csrA.values)
- torch.testing.assert_allclose(A2[idx], csrA.values)
===========changed ref 1===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2", values, ids=names)
def test_coo_double_quant(dim1, dim2):
threshold = 3.00
for i in range(k):
A = torch.randn(dim1, dim2, device="cuda").half()
idx = torch.abs(A) >= threshold
CA2, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(
A, threshold=threshold
)
if coo_tensor is not None:
A1 = A * idx
A2 = torch.zeros_like(A)
A2[
coo_tensor.rowidx.long(), coo_tensor.colidx.long()
] = coo_tensor.values
+ torch.testing.assert_close(A1, A2)
- torch.testing.assert_allclose(A1, A2)
A1 = A * (idx == 0)
A2 = (CA.float() * statsA.unsqueeze(1) / 127).half()
+ torch.testing.assert_close(
- torch.testing.assert_allclose(
A * (idx == 0), A2, rtol=0.05, atol=1.5e-2
)
===========changed ref 2===========
# module: tests.test_functional
@pytest.mark.parametrize(
"dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose",
values,
ids=names,
)
def test_transform(dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose):
for i in range(k):
if dims == 2:
A = torch.randint(10, 99, size=(dim1, dim2), device="cuda").to(
dtype
)
elif dims == 3:
A = torch.randint(
10, 99, size=(dim1, dim2, dim3), device="cuda"
).to(dtype)
A.view(-1)[-1] = -1
if transpose:
At = A.t().contiguous()
out1, S1 = F.nvidia_transform(At, to_order=orderOut)
else:
out1, S1 = F.nvidia_transform(A, to_order=orderOut)
out2, S2 = F.transform(A, to_order=orderOut, transpose=transpose)
assert S1[0][0] == S2[0][0]
assert S1[0][1] == S2[0][1]
# print(out1)
# print(out2)
+ torch.testing.assert_close(out1, out2)
- torch.testing.assert_allclose(out1, out2)
===========changed ref 3===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 4===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 8===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 9===========
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
+
===========changed ref 10===========
# module: bitsandbytes.functional
+ def dequantize_fp4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'fp4')
+
===========changed ref 11===========
# module: bitsandbytes.functional
+ def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
+
===========changed ref 12===========
# module: bitsandbytes.functional
+ def dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'nf4')
+
===========changed ref 13===========
# module: bitsandbytes.nn.modules
+ class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
+
===========changed ref 14===========
# module: bitsandbytes.nn.modules
+ class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
+
===========changed ref 15===========
# module: bitsandbytes.autograd._functions
+ def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
+ assert quant_state is not None
+ return MatMul4Bit.apply(A, B, out, bias, quant_state)
+
|
tests.test_functional/test_spmm_coo_dequant
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dtype", values, ids=names)
def test_spmm_coo_dequant(dim1, dim2, dtype):
<0> threshold = 6.0
<1> # threshold = 2.8
<2> # threshold = 0.0
<3> A = torch.randn(dim1, dim2, device="cuda").half()
<4> B = torch.empty(dim2, dim2 * 4, device="cuda", dtype=torch.float16)
<5> torch.nn.init.xavier_uniform_(B)
<6> Bt = B.t().contiguous()
<7>
<8> CB, CBt, statsB, statsBt, coo_tensor = F.double_quant(B)
<9>
<10> rowidx = torch.randint(0, A.shape[-1], size=(15,))
<11>
<12> A[:, rowidx] = 8.0
<13>
<14> idx = torch.abs(A) >= threshold
<15> nnz = (idx == 1).sum().item()
<16> rows, cols = torch.where(idx)
<17> values = A[idx]
<18> cooA = F.COOSparseTensor(
<19> A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
<20> )
<21> A2 = A * idx
<22> out2 = F.spmm_coo_very_sparse(cooA, CBt, dequant_stats=statsBt)
<23> out1 = torch.matmul(A2, B.half())
<24> out3 = F.spmm_coo_very_sparse(cooA, CBt.half())
<25> out3 = out3 * statsBt.half() / 127
<26>
<27> values, counts = torch.unique(cooA.rowidx, return_counts=True)
<28> offset = counts.cumsum(0).int()
<29> max_count, max_idx = torch.sort(counts, descending=True)
<30> print(torch.median(max_count</s>
|
===========below chunk 0===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dtype", values, ids=names)
def test_spmm_coo_dequant(dim1, dim2, dtype):
# offset: 1
torch.testing.assert_allclose(out2, out3, rtol=0.05, atol=0.001)
p = 200 / (2048 * 12288 * 4)
n = out1.numel()
count = math.ceil(p * n)
assert_all_approx_close(out1, out2, rtol=0.01, atol=3.0e-2, count=count)
# torch.cuda.synchronize()
# t0 = time.time()
# for i in range(100):
# out2 = F.spmm_coo_very_sparse(cooA, B)
# torch.cuda.synchronize()
# print('fp16', time.time() - t0)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
out2 = F.spmm_coo(cooA, B)
torch.cuda.synchronize()
print("cusparse fp16", time.time() - t0)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
out2 = F.spmm_coo_very_sparse(cooA, CBt)
torch.cuda.synchronize()
print("int8", time.time() - t0)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
out2 = F.spmm_coo_very_sparse(cooA, CBt, dequant_stats=statsBt)
torch.cuda.synchronize()
print("int8+dequant", time.time() - t0)
torch.cuda.</s>
===========below chunk 1===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dtype", values, ids=names)
def test_spmm_coo_dequant(dim1, dim2, dtype):
# offset: 2
<s>synchronize()
print("int8+dequant", time.time() - t0)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
out2 = torch.matmul(A, B)
torch.cuda.synchronize()
print("matmul", time.time() - t0)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
out1 = bnb.matmul(A, Bt)
out2 = F.spmm_coo_very_sparse(cooA, CBt, dequant_stats=statsBt)
out = out1 + out2
torch.cuda.synchronize()
print("sparse+ matmul", time.time() - t0)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
out1 = bnb.matmul(A, Bt)
torch.matmul(A[:, rowidx], Bt.t()[rowidx], out=out1)
torch.cuda.synchronize()
print("partial matmul", time.time() - t0)
torch.cuda.synchronize()
t0 = time.time()
for i in range(100):
out1 = bnb.matmul(A, Bt)
torch.cuda.synchronize()
print("partial matmul", time.time() - t0)
===========changed ref 0===========
<s> #out3 = F.spmm_coo(cooA, Bt.t())
# #out2 = F.spmm_coo(cooA, B)
# #out2 = F.spmm_coo_very_sparse(cooA, B)
# #out1 = torch.matmul(A, Bt.t())
# torch.cuda.synchronize()
# print(time.time() - t0)
def test_coo2csr():
threshold = 1
A = torch.randn(128, 128).half().cuda()
idx = torch.abs(A) >= threshold
nnz = (idx == 1).sum().item()
rows, cols = torch.where(idx)
values = A[idx]
cooA = F.COOSparseTensor(
A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
)
A2 = A * idx
csrA = F.coo2csr(cooA)
counts = csrA.rowptr[1:] - csrA.rowptr[:-1]
assert counts.numel() == A.shape[0]
+ torch.testing.assert_close(counts.long(), (A2 != 0).sum(1))
- torch.testing.assert_allclose(counts, (A2 != 0).sum(1))
idx = A2 != 0
+ torch.testing.assert_close(A2[idx], csrA.values)
- torch.testing.assert_allclose(A2[idx], csrA.values)
===========changed ref 1===========
# module: tests.test_functional
def test_coo2csc():
threshold = 1
A = torch.randn(128, 128).half().cuda()
idx = torch.abs(A) >= threshold
nnz = (idx == 1).sum().item()
rows, cols = torch.where(idx)
values = A[idx]
cooA = F.COOSparseTensor(
A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
)
A2 = A * idx
cscA = F.coo2csc(cooA)
counts = cscA.colptr[1:] - cscA.colptr[:-1]
assert counts.numel() == A.shape[1]
+ torch.testing.assert_close(counts.long(), (A2 != 0).sum(0))
- torch.testing.assert_allclose(counts, (A2 != 0).sum(0))
# torch uses row-major -> use transpose to transfer to col-major
idx = A2.t() != 0
+ torch.testing.assert_close(A2.t()[idx], cscA.values)
- torch.testing.assert_allclose(A2.t()[idx], cscA.values)
|
|
tests.test_functional/test_bench_matmul
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<0>:<add> iters = 1
<del> iters = 128
<6>:<add>
<add> B_fp4, state = F.quantize_fp4(B)
<add> B_fp4_c, state_c = F.quantize_fp4(B, compress_statistics=True)
<add>
<add> B_nf4, state_nf4= F.quantize_nf4(B)
<13>:<del> linearMixedBit = (
<14>:<add> linearMixedBit = (bnb.nn.Linear8bitLt(model, hidden, False, threshold=6.0).cuda().half())
<del> bnb.nn.Linear8bitLt(model, hidden, False, threshold=6.0).cuda().half()
<15>:<del> )
<17>:<add>
<add> linear8bit_train = bnb.nn.Linear8bitLt(model, hidden, False).cuda().half()
<add> linear8bit_train_thresh = bnb.nn.Linear8bitLt(model, hidden, False, threshold=6.0).cuda().half()
<29>:<del> print(
<30>:<add> print( f"pytorch fp16: [{batch},{seq},{model}], [{model},{hidden}]
|
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
<0> iters = 128
<1> formatB = F.get_special_format_str()
<2>
<3> A = torch.randn(batch, seq, model, device="cuda").half()
<4> B = torch.empty(hidden, model, dtype=torch.float16, device="cuda")
<5> torch.nn.init.xavier_uniform_(B)
<6>
<7> linear8bit = bnb.nn.Linear8bitLt(model, hidden, False).cuda().half()
<8> linear8bit.eval()
<9>
<10> outliers = torch.randint(0, model, size=(5,)).cuda()
<11> A[:, :, outliers] = 8.0
<12>
<13> linearMixedBit = (
<14> bnb.nn.Linear8bitLt(model, hidden, False, threshold=6.0).cuda().half()
<15> )
<16> linearMixedBit.eval()
<17>
<18> # warmup
<19> for i in range(iters):
<20> torch.matmul(A, B.t())
<21> torch.cuda.synchronize()
<22> print("")
<23>
<24> torch.cuda.synchronize()
<25> t0 = time.time()
<26> for i in range(iters):
<27> torch.matmul(A, B.t())
<28> torch.cuda.synchronize()
<29> print(
<30> f"pytorch fp16: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s"
<31> )
<32>
<33> torch.cuda.synchronize()
<34> t0 = time.time()
<35> for i in range(iters):
<36> bnb.matmul(A, B)
<37> torch.cuda.synchronize()
<38> print(f</s>
|
===========below chunk 0===========
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
# offset: 1
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
bnb.matmul(A, B, threshold=6.0)
torch.cuda.synchronize()
print(f"CB -> CxB conversion + threshold: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
CA, CAt, SCA, SCAt, coo_tensorA = F.double_quant(A, threshold=0.0)
C32A, SA = F.transform(CA, "col32")
CB, CBt, SCB, SCBt, coo_tensorB = F.double_quant(B)
CxB, SB = F.transform(CB, to_order=formatB)
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
out32, Sout32 = F.igemmlt(C32A, CxB, SA, SB)
torch.cuda.synchronize()
print(f"no overhead matmul-lt: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
BA, statsB = F.vectorwise_quant(B, dim=1)
CxB, SB = F.nvidia_transform(CB, to_order=formatB)
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
A2 = A.view(-1, A.shape[-1]).contiguous()
CA, statsA = F.vectorwise_quant(A2, dim=1)
</s>
===========below chunk 1===========
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
# offset: 2
<s>.shape[-1]).contiguous()
CA, statsA = F.vectorwise_quant(A2, dim=1)
C32A, SA = F.nvidia_transform(CA, "col32")
out32, Sout32 = F.igemmlt(C32A, CxB, SA, SB)
Cout, Sout = F.nvidia_transform(out32, "row", state=Sout32)
F.vectorwise_mm_dequant(Cout, statsA, statsB.t())
torch.cuda.synchronize()
#print(f"vector pytorch + nvidia: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
BA, statsB = F.vectorwise_quant(B, dim=1, quant_type="linear")
CxB, SB = F.nvidia_transform(CB, to_order=formatB)
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
A2 = A.view(-1, A.shape[-1]).contiguous()
CA, statsA = F.vectorwise_quant(A2, dim=1, quant_type="linear")
C32A, SA = F.nvidia_transform(CA, "col32")
out32, Sout32 = F.igemmlt(C32A, CxB, SA, SB)
Cout, Sout = F.nvidia_transform(out32, "row", state=Sout32)
out = Cout * statsB * statsA * (1.</s>
===========below chunk 2===========
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
# offset: 3
<s> (127 * 127))
torch.cuda.synchronize()
#print(f"linear pytorch + nvidia: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
linear8bit(A)
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
linear8bit(A)
torch.cuda.synchronize()
print(
f"bnb linear8bitlt: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s"
)
linearMixedBit(A)
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
linearMixedBit(A)
torch.cuda.synchronize()
print(
f"bnb linear8bitlt with threshold: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s"
)
===========changed ref 0===========
<s> #out3 = F.spmm_coo(cooA, Bt.t())
# #out2 = F.spmm_coo(cooA, B)
# #out2 = F.spmm_coo_very_sparse(cooA, B)
# #out1 = torch.matmul(A, Bt.t())
# torch.cuda.synchronize()
# print(time.time() - t0)
def test_coo2csr():
threshold = 1
A = torch.randn(128, 128).half().cuda()
idx = torch.abs(A) >= threshold
nnz = (idx == 1).sum().item()
rows, cols = torch.where(idx)
values = A[idx]
cooA = F.COOSparseTensor(
A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
)
A2 = A * idx
csrA = F.coo2csr(cooA)
counts = csrA.rowptr[1:] - csrA.rowptr[:-1]
assert counts.numel() == A.shape[0]
+ torch.testing.assert_close(counts.long(), (A2 != 0).sum(1))
- torch.testing.assert_allclose(counts, (A2 != 0).sum(1))
idx = A2 != 0
+ torch.testing.assert_close(A2[idx], csrA.values)
- torch.testing.assert_allclose(A2[idx], csrA.values)
|
tests.test_functional/test_extract_outliers
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<14>:<add> torch.testing.assert_close(outliers1, outliers2)
<del> torch.testing.assert_allclose(outliers1, outliers2)
<23>:<add> torch.testing.assert_close(outliers1, outliers2)
<del> torch.testing.assert_allclose(outliers1, outliers2)
|
# module: tests.test_functional
def test_extract_outliers():
<0> for i in range(k):
<1> shapeA = (4096, 4096 * 4)
<2> idx = torch.unique(torch.randint(0, shapeA[1], size=(10,)).int()).cuda()
<3> # idx = torch.Tensor([0]).int().cuda()
<4> A = torch.randint(-128, 127, size=shapeA, device="cuda").to(torch.int8)
<5> outliers1 = A[:, idx.long()]
<6>
<7> CA, SA = F.transform(A, "col_turing")
<8>
<9> outliers2 = F.extract_outliers(CA, SA, idx)
<10>
<11> assert outliers2.shape[0] == shapeA[0]
<12> assert outliers2.shape[1] == idx.numel()
<13>
<14> torch.testing.assert_allclose(outliers1, outliers2)
<15>
<16> CA, SA = F.transform(A, "col_ampere")
<17>
<18> outliers2 = F.extract_outliers(CA, SA, idx)
<19>
<20> assert outliers2.shape[0] == shapeA[0]
<21> assert outliers2.shape[1] == idx.numel()
<22>
<23> torch.testing.assert_allclose(outliers1, outliers2)
<24>
|
===========changed ref 0===========
<s> #out3 = F.spmm_coo(cooA, Bt.t())
# #out2 = F.spmm_coo(cooA, B)
# #out2 = F.spmm_coo_very_sparse(cooA, B)
# #out1 = torch.matmul(A, Bt.t())
# torch.cuda.synchronize()
# print(time.time() - t0)
def test_coo2csr():
threshold = 1
A = torch.randn(128, 128).half().cuda()
idx = torch.abs(A) >= threshold
nnz = (idx == 1).sum().item()
rows, cols = torch.where(idx)
values = A[idx]
cooA = F.COOSparseTensor(
A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
)
A2 = A * idx
csrA = F.coo2csr(cooA)
counts = csrA.rowptr[1:] - csrA.rowptr[:-1]
assert counts.numel() == A.shape[0]
+ torch.testing.assert_close(counts.long(), (A2 != 0).sum(1))
- torch.testing.assert_allclose(counts, (A2 != 0).sum(1))
idx = A2 != 0
+ torch.testing.assert_close(A2[idx], csrA.values)
- torch.testing.assert_allclose(A2[idx], csrA.values)
===========changed ref 1===========
# module: tests.test_functional
def test_coo2csc():
threshold = 1
A = torch.randn(128, 128).half().cuda()
idx = torch.abs(A) >= threshold
nnz = (idx == 1).sum().item()
rows, cols = torch.where(idx)
values = A[idx]
cooA = F.COOSparseTensor(
A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
)
A2 = A * idx
cscA = F.coo2csc(cooA)
counts = cscA.colptr[1:] - cscA.colptr[:-1]
assert counts.numel() == A.shape[1]
+ torch.testing.assert_close(counts.long(), (A2 != 0).sum(0))
- torch.testing.assert_allclose(counts, (A2 != 0).sum(0))
# torch uses row-major -> use transpose to transfer to col-major
idx = A2.t() != 0
+ torch.testing.assert_close(A2.t()[idx], cscA.values)
- torch.testing.assert_allclose(A2.t()[idx], cscA.values)
===========changed ref 2===========
# module: tests.test_functional
+ batch_size = 2
- batch_size = 1
+ seqdim = 2048
- seqdim = 1
values = []
values.append((batch_size, seqdim, 768, 4 * 768))
+ #values.append((batch_size, seqdim, 1024, 4*1024))
- # values.append((batch_size, seqdim, 1024, 4*1024))
+ #values.append((batch_size, seqdim, 1536, 4*1536))
- # values.append((batch_size, seqdim, 1536, 4*1536))
+ #values.append((batch_size, seqdim, 2048, 4*2048))
- # values.append((batch_size, seqdim, 2048, 4*2048))
+ #values.append((batch_size, seqdim, 2560, 4*2560))
- # values.append((batch_size, seqdim, 2560, 4*2560))
+ #values.append((batch_size, seqdim, 4096, 4*4096))
- # values.append((batch_size, seqdim, 4096, 4*4096))
+ #values.append((batch_size, seqdim, 5140, 4*5140))
- # values.append((batch_size, seqdim, 5140, 4*5140))
#values.append((batch_size, seqdim, 12288, 4*12288))
- names = [
+ names = ["batch_{}_seq_{}_model_{}_hidden_{}".format(*vals) for vals in values]
- "batch_{}_seq_{}_model_{}_hidden_{}".format(*vals) for vals in values
- ]
+ # F.prefetch_tensor(A)
+ # F.prefetch_tensor(B)
+
+
+ # F.fill(B2, 17.0)
+ # F._mul(A, B2)
+
+ # F.prefetch_tensor(A, to_cpu=True)
+ # F.prefetch_tensor(B, to_cpu=True)
+ # F.prefetch_tensor(B</s>
===========changed ref 3===========
# module: tests.test_functional
# offset: 1
<s> <add> # F.prefetch_tensor(B, to_cpu=True)
+ # F.prefetch_tensor(B2, to_cpu=True)
+ # torch.cuda.synchronize()
+
+ # assert (A==17).sum().item() == n*n
+
+ # torch.testing.assert_close(A, torch.ones(A.shape)*289)
+
===========changed ref 4===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2", values, ids=names)
def test_coo_double_quant(dim1, dim2):
threshold = 3.00
for i in range(k):
A = torch.randn(dim1, dim2, device="cuda").half()
idx = torch.abs(A) >= threshold
CA2, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)
CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(
A, threshold=threshold
)
if coo_tensor is not None:
A1 = A * idx
A2 = torch.zeros_like(A)
A2[
coo_tensor.rowidx.long(), coo_tensor.colidx.long()
] = coo_tensor.values
+ torch.testing.assert_close(A1, A2)
- torch.testing.assert_allclose(A1, A2)
A1 = A * (idx == 0)
A2 = (CA.float() * statsA.unsqueeze(1) / 127).half()
+ torch.testing.assert_close(
- torch.testing.assert_allclose(
A * (idx == 0), A2, rtol=0.05, atol=1.5e-2
)
|
tests.test_functional/test_fp8_quant
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<4>:<del> print(e_bits, p_bits)
|
# module: tests.test_functional
# print(sum(diffs)/len(diffs))
# print(sum(reldiffs)/len(reldiffs))
def test_fp8_quant():
<0> for e_bits in range(1, 7):
<1> p_bits = 7-e_bits
<2> code = F.create_fp8_map(True, e_bits, p_bits).cuda()
<3>
<4> print(e_bits, p_bits)
<5> abserr = []
<6> relerr = []
<7> for i in range(100):
<8> A1 = torch.randn(1024, 1024, device="cuda")
<9> C, SC = F.quantize_blockwise(A1, code=code)
<10> A2 = F.dequantize_blockwise(C, SC)
<11> diff = torch.abs(A1 - A2)
<12> reldiff = diff/torch.abs(A1+1e-8)
<13> abserr.append(diff.mean().item())
<14> relerr.append(reldiff.mean().item())
<15> #assert diff < 0.0075
<16> #print(sum(abserr)/len(abserr))
<17> #print(sum(relerr)/len(relerr))
<18>
<19> abserr = []
<20> relerr = []
<21> for i in range(100):
<22> A1 = torch.rand(1024, 1024, device="cuda")
<23> C, SC = F.quantize_blockwise(A1, code=code)
<24> A2 = F.dequantize_blockwise(C, SC)
<25> diff = torch.abs(A1 - A2)
<26> reldiff = diff/torch.abs(A1+1e-8)
<27> abserr.append(diff.mean().item())
<28> relerr.append(reldiff.mean().item())
<29> #assert diff < 0.0075
<30> #print(sum(abserr)/len(abserr))
<31> #print(sum(relerr)/</s>
|
===========below chunk 0===========
# module: tests.test_functional
# print(sum(diffs)/len(diffs))
# print(sum(reldiffs)/len(reldiffs))
def test_fp8_quant():
# offset: 1
abserr = []
relerr = []
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
C, SC = F.quantize_blockwise(A1)
A2 = F.dequantize_blockwise(C, SC)
diff = torch.abs(A1 - A2)
reldiff = diff/torch.abs(A1+1e-8)
abserr.append(diff.mean().item())
relerr.append(reldiff.mean().item())
===========changed ref 0===========
# module: tests.test_functional
def test_extract_outliers():
for i in range(k):
shapeA = (4096, 4096 * 4)
idx = torch.unique(torch.randint(0, shapeA[1], size=(10,)).int()).cuda()
# idx = torch.Tensor([0]).int().cuda()
A = torch.randint(-128, 127, size=shapeA, device="cuda").to(torch.int8)
outliers1 = A[:, idx.long()]
CA, SA = F.transform(A, "col_turing")
outliers2 = F.extract_outliers(CA, SA, idx)
assert outliers2.shape[0] == shapeA[0]
assert outliers2.shape[1] == idx.numel()
+ torch.testing.assert_close(outliers1, outliers2)
- torch.testing.assert_allclose(outliers1, outliers2)
CA, SA = F.transform(A, "col_ampere")
outliers2 = F.extract_outliers(CA, SA, idx)
assert outliers2.shape[0] == shapeA[0]
assert outliers2.shape[1] == idx.numel()
+ torch.testing.assert_close(outliers1, outliers2)
- torch.testing.assert_allclose(outliers1, outliers2)
===========changed ref 1===========
<s> #out3 = F.spmm_coo(cooA, Bt.t())
# #out2 = F.spmm_coo(cooA, B)
# #out2 = F.spmm_coo_very_sparse(cooA, B)
# #out1 = torch.matmul(A, Bt.t())
# torch.cuda.synchronize()
# print(time.time() - t0)
def test_coo2csr():
threshold = 1
A = torch.randn(128, 128).half().cuda()
idx = torch.abs(A) >= threshold
nnz = (idx == 1).sum().item()
rows, cols = torch.where(idx)
values = A[idx]
cooA = F.COOSparseTensor(
A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
)
A2 = A * idx
csrA = F.coo2csr(cooA)
counts = csrA.rowptr[1:] - csrA.rowptr[:-1]
assert counts.numel() == A.shape[0]
+ torch.testing.assert_close(counts.long(), (A2 != 0).sum(1))
- torch.testing.assert_allclose(counts, (A2 != 0).sum(1))
idx = A2 != 0
+ torch.testing.assert_close(A2[idx], csrA.values)
- torch.testing.assert_allclose(A2[idx], csrA.values)
===========changed ref 2===========
# module: tests.test_functional
def test_coo2csc():
threshold = 1
A = torch.randn(128, 128).half().cuda()
idx = torch.abs(A) >= threshold
nnz = (idx == 1).sum().item()
rows, cols = torch.where(idx)
values = A[idx]
cooA = F.COOSparseTensor(
A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
)
A2 = A * idx
cscA = F.coo2csc(cooA)
counts = cscA.colptr[1:] - cscA.colptr[:-1]
assert counts.numel() == A.shape[1]
+ torch.testing.assert_close(counts.long(), (A2 != 0).sum(0))
- torch.testing.assert_allclose(counts, (A2 != 0).sum(0))
# torch uses row-major -> use transpose to transfer to col-major
idx = A2.t() != 0
+ torch.testing.assert_close(A2.t()[idx], cscA.values)
- torch.testing.assert_allclose(A2.t()[idx], cscA.values)
===========changed ref 3===========
# module: tests.test_functional
+ batch_size = 2
- batch_size = 1
+ seqdim = 2048
- seqdim = 1
values = []
values.append((batch_size, seqdim, 768, 4 * 768))
+ #values.append((batch_size, seqdim, 1024, 4*1024))
- # values.append((batch_size, seqdim, 1024, 4*1024))
+ #values.append((batch_size, seqdim, 1536, 4*1536))
- # values.append((batch_size, seqdim, 1536, 4*1536))
+ #values.append((batch_size, seqdim, 2048, 4*2048))
- # values.append((batch_size, seqdim, 2048, 4*2048))
+ #values.append((batch_size, seqdim, 2560, 4*2560))
- # values.append((batch_size, seqdim, 2560, 4*2560))
+ #values.append((batch_size, seqdim, 4096, 4*4096))
- # values.append((batch_size, seqdim, 4096, 4*4096))
+ #values.append((batch_size, seqdim, 5140, 4*5140))
- # values.append((batch_size, seqdim, 5140, 4*5140))
#values.append((batch_size, seqdim, 12288, 4*12288))
- names = [
+ names = ["batch_{}_seq_{}_model_{}_hidden_{}".format(*vals) for vals in values]
- "batch_{}_seq_{}_model_{}_hidden_{}".format(*vals) for vals in values
- ]
+ # F.prefetch_tensor(A)
+ # F.prefetch_tensor(B)
+
+
+ # F.fill(B2, 17.0)
+ # F._mul(A, B2)
+
+ # F.prefetch_tensor(A, to_cpu=True)
+ # F.prefetch_tensor(B, to_cpu=True)
+ # F.prefetch_tensor(B</s>
===========changed ref 4===========
# module: tests.test_functional
# offset: 1
<s> <add> # F.prefetch_tensor(B, to_cpu=True)
+ # F.prefetch_tensor(B2, to_cpu=True)
+ # torch.cuda.synchronize()
+
+ # assert (A==17).sum().item() == n*n
+
+ # torch.testing.assert_close(A, torch.ones(A.shape)*289)
+
|
tests.test_functional/test_few_bit_quant
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<13>:<del> print(code)
|
# module: tests.test_functional
#assert diff < 0.0075
#print(3, sum(abserr)/len(abserr))
#print(3, sum(relerr)/len(relerr))
def test_few_bit_quant():
<0> #print('')
<1> for bits in range(2, 9):
<2> #print('='*30, bits, '='*30)
<3> for method in ['linear', 'fp8', 'dynamic', 'quantile']:
<4> abserrs = []
<5> relerrs = []
<6> code = None
<7> if method == 'linear':
<8> code = F.create_linear_map(True, total_bits=bits).cuda()
<9> elif method == 'fp8':
<10> ebits = math.ceil(bits/2)
<11> pbits = bits-ebits-1
<12> code = F.create_fp8_map(True, ebits, pbits, bits).cuda()
<13> print(code)
<14> elif method == 'dynamic':
<15> code = F.create_dynamic_map(True, bits-0, bits).cuda()
<16> elif method == 'quantile':
<17> values = torch.randn(2048, 2048, device='cuda')
<18> code = F.create_quantile_map(values, bits).cuda()
<19> # for some data types we have no zero
<20> # for some data types we have one zero
<21> # for some data types we have two zeros
<22> assert torch.unique(code).numel() in [2**bits, 2**bits-1], f'bits: {bits}, method: {method}'
<23> #print(method, (code==0).sum())
<24> assert code.numel() == 256
<25> for i in range(10):
<26>
<27> values = torch.randn(1, 32, device='cuda')
<28> values /= values.abs().max()
<29> #values[values.abs() < 1e-6] += 1e-5
<30>
<31> q1 = []
<32> v1 = []
</s>
|
===========below chunk 0===========
# module: tests.test_functional
#assert diff < 0.0075
#print(3, sum(abserr)/len(abserr))
#print(3, sum(relerr)/len(relerr))
def test_few_bit_quant():
# offset: 1
idx = torch.abs(v-code).argmin()
q1.append(idx.item())
v1.append(code[idx].item())
q1 = torch.Tensor(q1).cuda()
v1 = torch.Tensor(v1).cuda()
q2, S2 = F.quantize_blockwise(values, code=code)
v2 = F.dequantize_blockwise(q2, S2)
idx = torch.isclose(q1.int(), q2.int())
err2 = torch.abs(v2-values)
abserrs.append(err2.mean().item())
relerrs.append((err2/(1e-10+values).abs()).mean().item())
if idx.sum():
# some weird cases
err1 = torch.abs(v1-values).mean()
#assert err2.mean() <= err1
else:
torch.testing.assert_allclose(q1, q2)
===========changed ref 0===========
# module: tests.test_functional
def test_extract_outliers():
for i in range(k):
shapeA = (4096, 4096 * 4)
idx = torch.unique(torch.randint(0, shapeA[1], size=(10,)).int()).cuda()
# idx = torch.Tensor([0]).int().cuda()
A = torch.randint(-128, 127, size=shapeA, device="cuda").to(torch.int8)
outliers1 = A[:, idx.long()]
CA, SA = F.transform(A, "col_turing")
outliers2 = F.extract_outliers(CA, SA, idx)
assert outliers2.shape[0] == shapeA[0]
assert outliers2.shape[1] == idx.numel()
+ torch.testing.assert_close(outliers1, outliers2)
- torch.testing.assert_allclose(outliers1, outliers2)
CA, SA = F.transform(A, "col_ampere")
outliers2 = F.extract_outliers(CA, SA, idx)
assert outliers2.shape[0] == shapeA[0]
assert outliers2.shape[1] == idx.numel()
+ torch.testing.assert_close(outliers1, outliers2)
- torch.testing.assert_allclose(outliers1, outliers2)
===========changed ref 1===========
# module: tests.test_functional
# print(sum(diffs)/len(diffs))
# print(sum(reldiffs)/len(reldiffs))
def test_fp8_quant():
for e_bits in range(1, 7):
p_bits = 7-e_bits
code = F.create_fp8_map(True, e_bits, p_bits).cuda()
- print(e_bits, p_bits)
abserr = []
relerr = []
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
C, SC = F.quantize_blockwise(A1, code=code)
A2 = F.dequantize_blockwise(C, SC)
diff = torch.abs(A1 - A2)
reldiff = diff/torch.abs(A1+1e-8)
abserr.append(diff.mean().item())
relerr.append(reldiff.mean().item())
#assert diff < 0.0075
#print(sum(abserr)/len(abserr))
#print(sum(relerr)/len(relerr))
abserr = []
relerr = []
for i in range(100):
A1 = torch.rand(1024, 1024, device="cuda")
C, SC = F.quantize_blockwise(A1, code=code)
A2 = F.dequantize_blockwise(C, SC)
diff = torch.abs(A1 - A2)
reldiff = diff/torch.abs(A1+1e-8)
abserr.append(diff.mean().item())
relerr.append(reldiff.mean().item())
#assert diff < 0.0075
#print(sum(abserr)/len(abserr))
#print(sum(relerr)/len(relerr))
abserr = []
relerr = []
for i in range(100):
A1</s>
===========changed ref 2===========
# module: tests.test_functional
# print(sum(diffs)/len(diffs))
# print(sum(reldiffs)/len(reldiffs))
def test_fp8_quant():
# offset: 1
<s>(relerr))
abserr = []
relerr = []
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
C, SC = F.quantize_blockwise(A1)
A2 = F.dequantize_blockwise(C, SC)
diff = torch.abs(A1 - A2)
reldiff = diff/torch.abs(A1+1e-8)
abserr.append(diff.mean().item())
relerr.append(reldiff.mean().item())
===========changed ref 3===========
<s> #out3 = F.spmm_coo(cooA, Bt.t())
# #out2 = F.spmm_coo(cooA, B)
# #out2 = F.spmm_coo_very_sparse(cooA, B)
# #out1 = torch.matmul(A, Bt.t())
# torch.cuda.synchronize()
# print(time.time() - t0)
def test_coo2csr():
threshold = 1
A = torch.randn(128, 128).half().cuda()
idx = torch.abs(A) >= threshold
nnz = (idx == 1).sum().item()
rows, cols = torch.where(idx)
values = A[idx]
cooA = F.COOSparseTensor(
A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values
)
A2 = A * idx
csrA = F.coo2csr(cooA)
counts = csrA.rowptr[1:] - csrA.rowptr[:-1]
assert counts.numel() == A.shape[0]
+ torch.testing.assert_close(counts.long(), (A2 != 0).sum(1))
- torch.testing.assert_allclose(counts, (A2 != 0).sum(1))
idx = A2 != 0
+ torch.testing.assert_close(A2[idx], csrA.values)
- torch.testing.assert_allclose(A2[idx], csrA.values)
|
tests.test_functional/test_bench_dequantization
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<11>:<add> qa, SA = F.quantize_blockwise(a)
<del> F.dequantize_blockwise(qa, SA, blocksize=2048)
|
# module: tests.test_functional
def test_bench_dequantization():
<0> a = torch.rand(1024, 1024, device='cuda').half()
<1> code =F.create_fp8_map(True, 3, 0, 4).cuda()
<2> qa, SA = F.quantize_blockwise(a, code=code)
<3> print(qa.max())
<4>
<5> max_theoretical_mu = 1024*1024*2/1024**3/672*1000*1000
<6> #print(max_theoretical_mu)
<7>
<8> torch.cuda.synchronize()
<9> t0 = time.time()
<10> for i in range(100):
<11> F.dequantize_blockwise(qa, SA, blocksize=2048)
<12> torch.cuda.synchronize()
<13>
|
===========changed ref 0===========
# module: tests.test_functional
def test_extract_outliers():
for i in range(k):
shapeA = (4096, 4096 * 4)
idx = torch.unique(torch.randint(0, shapeA[1], size=(10,)).int()).cuda()
# idx = torch.Tensor([0]).int().cuda()
A = torch.randint(-128, 127, size=shapeA, device="cuda").to(torch.int8)
outliers1 = A[:, idx.long()]
CA, SA = F.transform(A, "col_turing")
outliers2 = F.extract_outliers(CA, SA, idx)
assert outliers2.shape[0] == shapeA[0]
assert outliers2.shape[1] == idx.numel()
+ torch.testing.assert_close(outliers1, outliers2)
- torch.testing.assert_allclose(outliers1, outliers2)
CA, SA = F.transform(A, "col_ampere")
outliers2 = F.extract_outliers(CA, SA, idx)
assert outliers2.shape[0] == shapeA[0]
assert outliers2.shape[1] == idx.numel()
+ torch.testing.assert_close(outliers1, outliers2)
- torch.testing.assert_allclose(outliers1, outliers2)
===========changed ref 1===========
# module: tests.test_functional
# print(sum(diffs)/len(diffs))
# print(sum(reldiffs)/len(reldiffs))
def test_fp8_quant():
for e_bits in range(1, 7):
p_bits = 7-e_bits
code = F.create_fp8_map(True, e_bits, p_bits).cuda()
- print(e_bits, p_bits)
abserr = []
relerr = []
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
C, SC = F.quantize_blockwise(A1, code=code)
A2 = F.dequantize_blockwise(C, SC)
diff = torch.abs(A1 - A2)
reldiff = diff/torch.abs(A1+1e-8)
abserr.append(diff.mean().item())
relerr.append(reldiff.mean().item())
#assert diff < 0.0075
#print(sum(abserr)/len(abserr))
#print(sum(relerr)/len(relerr))
abserr = []
relerr = []
for i in range(100):
A1 = torch.rand(1024, 1024, device="cuda")
C, SC = F.quantize_blockwise(A1, code=code)
A2 = F.dequantize_blockwise(C, SC)
diff = torch.abs(A1 - A2)
reldiff = diff/torch.abs(A1+1e-8)
abserr.append(diff.mean().item())
relerr.append(reldiff.mean().item())
#assert diff < 0.0075
#print(sum(abserr)/len(abserr))
#print(sum(relerr)/len(relerr))
abserr = []
relerr = []
for i in range(100):
A1</s>
===========changed ref 2===========
# module: tests.test_functional
# print(sum(diffs)/len(diffs))
# print(sum(reldiffs)/len(reldiffs))
def test_fp8_quant():
# offset: 1
<s>(relerr))
abserr = []
relerr = []
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
C, SC = F.quantize_blockwise(A1)
A2 = F.dequantize_blockwise(C, SC)
diff = torch.abs(A1 - A2)
reldiff = diff/torch.abs(A1+1e-8)
abserr.append(diff.mean().item())
relerr.append(reldiff.mean().item())
===========changed ref 3===========
# module: tests.test_functional
#assert diff < 0.0075
#print(3, sum(abserr)/len(abserr))
#print(3, sum(relerr)/len(relerr))
def test_few_bit_quant():
#print('')
for bits in range(2, 9):
#print('='*30, bits, '='*30)
for method in ['linear', 'fp8', 'dynamic', 'quantile']:
abserrs = []
relerrs = []
code = None
if method == 'linear':
code = F.create_linear_map(True, total_bits=bits).cuda()
elif method == 'fp8':
ebits = math.ceil(bits/2)
pbits = bits-ebits-1
code = F.create_fp8_map(True, ebits, pbits, bits).cuda()
- print(code)
elif method == 'dynamic':
code = F.create_dynamic_map(True, bits-0, bits).cuda()
elif method == 'quantile':
values = torch.randn(2048, 2048, device='cuda')
code = F.create_quantile_map(values, bits).cuda()
# for some data types we have no zero
# for some data types we have one zero
# for some data types we have two zeros
assert torch.unique(code).numel() in [2**bits, 2**bits-1], f'bits: {bits}, method: {method}'
#print(method, (code==0).sum())
assert code.numel() == 256
for i in range(10):
values = torch.randn(1, 32, device='cuda')
values /= values.abs().max()
#values[values.abs() < 1e-6] += 1e-5
q1 = []
v1 = []
for v in values[0]:
idx = torch.abs(v-code).argmin()
q1.append(idx.</s>
===========changed ref 4===========
# module: tests.test_functional
#assert diff < 0.0075
#print(3, sum(abserr)/len(abserr))
#print(3, sum(relerr)/len(relerr))
def test_few_bit_quant():
# offset: 1
<s> v in values[0]:
idx = torch.abs(v-code).argmin()
q1.append(idx.item())
v1.append(code[idx].item())
q1 = torch.Tensor(q1).cuda()
v1 = torch.Tensor(v1).cuda()
q2, S2 = F.quantize_blockwise(values, code=code)
v2 = F.dequantize_blockwise(q2, S2)
idx = torch.isclose(q1.int(), q2.int())
err2 = torch.abs(v2-values)
abserrs.append(err2.mean().item())
relerrs.append((err2/(1e-10+values).abs()).mean().item())
if idx.sum():
# some weird cases
err1 = torch.abs(v1-values).mean()
#assert err2.mean() <= err1
else:
+ torch.testing.assert_close(q1, q2)
- torch.testing.assert_allclose(q1, q2)
|
tests.test_optim/assert_most_approx_close
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<4>:<add> torch.testing.assert_close(a, b, rtol, atol)
<del> torch.testing.assert_allclose(a, b, rtol, atol)
|
# module: tests.test_optim
def assert_most_approx_close(a, b, rtol=1e-3, atol=1e-3, max_error_count=0):
<0> idx = torch.isclose(a, b, rtol, atol)
<1> error_count = (idx == 0).sum().item()
<2> if error_count > max_error_count:
<3> print(f"Too many values not close: assert {error_count} < {max_error_count}")
<4> torch.testing.assert_allclose(a, b, rtol, atol)
<5>
|
===========changed ref 0===========
# module: tests.test_optim
# import apex
k = 20
+ str2bf16support = {}
+ str2bf16support['adam8bit_blockwise'] = True
+
===========changed ref 1===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 2===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 3===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 4===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 7===========
# module: tests.test_functional
- #print((time.time()-t0)/1e6)
-
===========changed ref 8===========
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
+
===========changed ref 9===========
# module: bitsandbytes.functional
+ def dequantize_fp4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'fp4')
+
===========changed ref 10===========
# module: bitsandbytes.functional
+ def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
+
===========changed ref 11===========
# module: bitsandbytes.functional
+ def dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'nf4')
+
===========changed ref 12===========
# module: bitsandbytes.nn.modules
+ class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
+
===========changed ref 13===========
# module: bitsandbytes.nn.modules
+ class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
+
===========changed ref 14===========
# module: bitsandbytes.autograd._functions
+ def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
+ assert quant_state is not None
+ return MatMul4Bit.apply(A, B, out, bias, quant_state)
+
===========changed ref 15===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ @classmethod
+ def get_instance(cls):
+ if cls._instance is None:
+ cls._instance = cls.__new__(cls)
+ cls._instance.initialize()
+ return cls._instance
+
===========changed ref 16===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
+ self.error = False
===========changed ref 17===========
# module: bitsandbytes.optim.adam
+ class PagedAdam32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 18===========
# module: bitsandbytes.optim.adam
+ class PagedAdam8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 19===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 20===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 21===========
# module: bitsandbytes.optim.adam
+ class PagedAdam(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
|
tests.test_optim/test_optimizer32bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<11>:<add> elif gtype == torch.bfloat16:
<add> atol, rtol = 1e-3, 1e-2
<24>:<add> torch.testing.assert_close(
<del> torch.testing.assert_allclose(
<26>:<add> bnb_optimizer.state[p2][name2].cuda(),
<del> bnb_optimizer.state[p2][name2],
|
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer32bit(dim1, dim2, gtype, optim_name):
<0> if dim1 == 1 and dim2 == 1:
<1> return
<2> p1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1
<3> p2 = p1.clone()
<4> p1 = p1.float()
<5>
<6> torch_optimizer = str2optimizers[optim_name][0]([p1])
<7> bnb_optimizer = str2optimizers[optim_name][1]([p2])
<8>
<9> if gtype == torch.float32:
<10> atol, rtol = 1e-6, 1e-5
<11> else:
<12> atol, rtol = 1e-4, 1e-3
<13>
<14> for i in range(k):
<15> g = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.01
<16> p1.grad = g.clone().float()
<17> p2.grad = g.clone()
<18>
<19> bnb_optimizer.step()
<20> torch_optimizer.step()
<21>
<22>
<23> for name1, name2 in str2statenames[optim_name]:
<24> torch.testing.assert_allclose(
<25> torch_optimizer.state[p1][name1],
<26> bnb_optimizer.state[p2][name2],
<27> atol=atol,
<28> rtol=rtol,
<29> )
<30>
<31> # since Lion can have pretty noisy updates where things lie at the boundary
<32> # allow up to 10 errors for Lion
<33> assert_most_approx_close(p1, p2.float(), atol, rtol, max_error_count=10)
<34>
<35> if i % (k // 5) == 0 and i > 0:
<36> path = get_temp_dir()
<37> torch.save</s>
|
===========below chunk 0===========
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer32bit(dim1, dim2, gtype, optim_name):
# offset: 1
del bnb_optimizer
bnb_optimizer = None
bnb_optimizer = str2optimizers[optim_name][1]([p2])
bnb_optimizer.load_state_dict(torch.load(join(path, "opt.pt")))
rm_path(path)
# since Lion can have pretty noisy updates where things lie at the boundary
# allow up to 10 errors for Lion
assert_most_approx_close(p1, p2.float(), atol, rtol, max_error_count=10)
for name1, name2 in str2statenames[optim_name]:
# since Lion can have pretty noisy updates where things lie at the boundary
# allow up to 10 errors for Lion
assert_most_approx_close(torch_optimizer.state[p1][name1], bnb_optimizer.state[p2][name2],
atol=atol, rtol=rtol,
max_error_count=10)
if gtype == torch.float16:
# the adam buffers should also be close because they are 32-bit
# but the paramters can diverge because they are 16-bit
# the difference grow larger and larger with each update
# --> copy the state to keep weights close
p1.data = p1.data.half().float()
p2.copy_(p1.data)
torch.testing.assert_allclose(p1.half(), p2)
if optim_name in ["lars", "lamb"]:
assert bnb_optimizer.state[p2]["unorm_vec"] > 0.0
===========changed ref 0===========
# module: tests.test_optim
# import apex
k = 20
+ str2bf16support = {}
+ str2bf16support['adam8bit_blockwise'] = True
+
===========changed ref 1===========
# module: tests.test_optim
def assert_most_approx_close(a, b, rtol=1e-3, atol=1e-3, max_error_count=0):
idx = torch.isclose(a, b, rtol, atol)
error_count = (idx == 0).sum().item()
if error_count > max_error_count:
print(f"Too many values not close: assert {error_count} < {max_error_count}")
+ torch.testing.assert_close(a, b, rtol, atol)
- torch.testing.assert_allclose(a, b, rtol, atol)
===========changed ref 2===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 3===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 4===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 8===========
# module: tests.test_functional
- #print((time.time()-t0)/1e6)
-
===========changed ref 9===========
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
+
===========changed ref 10===========
# module: bitsandbytes.functional
+ def dequantize_fp4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'fp4')
+
===========changed ref 11===========
# module: bitsandbytes.functional
+ def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
+
===========changed ref 12===========
# module: bitsandbytes.functional
+ def dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'nf4')
+
===========changed ref 13===========
# module: bitsandbytes.nn.modules
+ class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
+
===========changed ref 14===========
# module: bitsandbytes.nn.modules
+ class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
+
===========changed ref 15===========
# module: bitsandbytes.autograd._functions
+ def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
+ assert quant_state is not None
+ return MatMul4Bit.apply(A, B, out, bias, quant_state)
+
===========changed ref 16===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ @classmethod
+ def get_instance(cls):
+ if cls._instance is None:
+ cls._instance = cls.__new__(cls)
+ cls._instance.initialize()
+ return cls._instance
+
===========changed ref 17===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
+ self.error = False
===========changed ref 18===========
# module: bitsandbytes.optim.adam
+ class PagedAdam32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
|
tests.test_optim/test_optimizer8bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<0>:<add> if gtype == torch.bfloat16 and optim_name not in str2bf16support: return
<13>:<add> elif gtype == torch.bfloat16:
<add> atol, rtol = 3e-3, 1e-3
<add> patol, prtol = 1e-4, 1e-2
<del>
<21>:<add> for i in range(100):
<del> for i in range(50):
|
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer8bit(dim1, dim2, gtype, optim_name):
<0> if dim1 == 1 and dim2 == 1:
<1> return
<2> p1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1
<3> p2 = p1.clone()
<4> p1 = p1.float()
<5> blocksize = 2048
<6>
<7> torch_optimizer = str2optimizers[optim_name][0]([p1])
<8> bnb_optimizer = str2optimizers[optim_name][1]([p2])
<9>
<10> if gtype == torch.float32:
<11> atol, rtol = 3e-3, 1e-3
<12> patol, prtol = 1e-5, 1e-3
<13>
<14> else:
<15> atol, rtol = 3e-3, 1e-3
<16> patol, prtol = 1e-5, 1e-3
<17>
<18> errors = []
<19> relerrors = []
<20>
<21> for i in range(50):
<22> g = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.01
<23> p1.grad = g.clone().float()
<24> p2.grad = g.clone()
<25>
<26> bnb_optimizer.step()
<27> torch_optimizer.step()
<28>
<29> # since Lion can have pretty noisy updates where things lie at the boundary
<30> # allow up to 5 errors for Lion
<31> assert_most_approx_close(p1, p2.float(), patol, prtol, max_error_count=5)
<32>
<33> dequant_states = []
<34> for name1, name2, qmap, max_val in str2statenames[optim_name]:
<35> # print(bnb_optimizer.state[p2][max_val], name1)
<36> if "</s>
|
===========below chunk 0===========
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer8bit(dim1, dim2, gtype, optim_name):
# offset: 1
s1 = F.dequantize_blockwise(
code=bnb_optimizer.state[p2][qmap],
absmax=bnb_optimizer.state[p2][max_val],
A=bnb_optimizer.state[p2][name2],
blocksize=blocksize,
)
else:
s1 = F.dequantize(
code=bnb_optimizer.state[p2][qmap],
absmax=bnb_optimizer.state[p2][max_val],
A=bnb_optimizer.state[p2][name2],
)
num_not_close = (
torch.isclose(
torch_optimizer.state[p1][name1], s1, atol=atol, rtol=rtol
)
== 0
)
assert num_not_close.sum().item() < 20
dequant_states.append(s1.clone())
err = torch.abs(p1 - p2)
relerr = err / (torch.abs(p1)+1e-9)
assert err.mean() < 0.0001
assert relerr.mean() < 0.001
errors.append(err.mean().item())
relerrors.append(relerr.mean().item())
if i % 10 == 0 and i > 0:
for (name1, name2, qmap, max_val), s in zip(
str2statenames[optim_name], dequant_states
):
s1cpy = s.clone()
raws1cpy = bnb_optimizer.state[p2][name2].clone()
qmap1 = bnb_optimizer.state[p2][qmap].clone()
path = get_temp_dir()
torch.save(bnb_optimizer.</s>
===========below chunk 1===========
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer8bit(dim1, dim2, gtype, optim_name):
# offset: 2
<s>p2][qmap].clone()
path = get_temp_dir()
torch.save(bnb_optimizer.state_dict(), join(path, "opt.pt"))
del bnb_optimizer
bnb_optimizer = None
bnb_optimizer = str2optimizers[optim_name][1]([p2])
bnb_optimizer.load_state_dict(torch.load(join(path, "opt.pt")))
rm_path(path)
torch.testing.assert_allclose(
raws1cpy, bnb_optimizer.state[p2][name2]
)
torch.testing.assert_allclose(
qmap1, bnb_optimizer.state[p2][qmap]
)
if "blockwise" in optim_name:
s1 = F.dequantize_blockwise(
code=bnb_optimizer.state[p2][qmap],
absmax=bnb_optimizer.state[p2][max_val],
A=bnb_optimizer.state[p2][name2],
blocksize=blocksize,
)
else:
s1 = F.dequantize(
code=bnb_optimizer.state[p2][qmap],
absmax=bnb_optimizer.state[p2][max_val],
A=bnb_optimizer.state[p2][name2],
)
torch.testing.assert_allclose(s1cpy, s1)
num_not_close = (
torch.isclose(
torch_optimizer.state[p1][name1],
s1,
atol=atol,
</s>
===========below chunk 2===========
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer8bit(dim1, dim2, gtype, optim_name):
# offset: 3
<s>=rtol,
)
== 0
)
assert num_not_close.sum().item() < 20
# since Lion can have pretty noisy updates where things lie at the boundary
# allow up to 5 errors for Lion
assert_most_approx_close(p1, p2.float(), patol, prtol, max_error_count=5)
# the parameters diverge quickly. Here we keep them close
# together so we can test against the Adam error
p1.data = p1.data.to(gtype).float()
p2.copy_(p1.data)
torch.testing.assert_allclose(p1.to(gtype), p2)
for (name1, name2, qmap, max_val), s in zip(
str2statenames[optim_name], dequant_states
):
torch_optimizer.state[p1][name1].copy_(s.data)
===========changed ref 0===========
# module: tests.test_optim
dim1 = [1024]
dim2 = [32, 1024, 4097]
+ gtype = [torch.float32, torch.float16, torch.bfloat16]
- gtype = [torch.float32, torch.float16]
optimizer_names = [
"adam8bit",
"lion8bit",
"momentum8bit",
"rmsprop8bit",
"adam8bit_blockwise",
"lion8bit_blockwise",
- "lars8bit",
"momentum8bit_blockwise",
"rmsprop8bit_blockwise",
]
values = list(product(dim1, dim2, gtype, optimizer_names))
names = [
"dim1_{}_dim2_{}_gtype_{}_optim_{}".format(*vals) for vals in values
]
===========changed ref 1===========
# module: tests.test_optim
# import apex
k = 20
+ str2bf16support = {}
+ str2bf16support['adam8bit_blockwise'] = True
+
===========changed ref 2===========
# module: tests.test_optim
def assert_most_approx_close(a, b, rtol=1e-3, atol=1e-3, max_error_count=0):
idx = torch.isclose(a, b, rtol, atol)
error_count = (idx == 0).sum().item()
if error_count > max_error_count:
print(f"Too many values not close: assert {error_count} < {max_error_count}")
+ torch.testing.assert_close(a, b, rtol, atol)
- torch.testing.assert_allclose(a, b, rtol, atol)
===========changed ref 3===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 4===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 8===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
|
tests.test_optim/test_adam_percentile_clipping
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype, optim_bits", values, ids=names)
def test_adam_percentile_clipping(dim1, dim2, gtype, optim_bits):
<0> if dim1 == 1 and dim2 == 1:
<1> return
<2> p1 = torch.randn(dim1, dim2, device="cpu", dtype=gtype) * 0.1
<3> beta1 = 0.9
<4> beta2 = 0.999
<5> lr = 0.001
<6> eps = 1e-8
<7> p1 = p1.cuda()
<8> p2 = p1.clone()
<9> adam1 = bnb.optim.Adam([p1], lr, (beta1, beta2), eps, optim_bits=optim_bits)
<10> adam2 = bnb.optim.Adam(
<11> [p2],
<12> lr,
<13> (beta1, beta2),
<14> eps,
<15> optim_bits=optim_bits,
<16> percentile_clipping=5,
<17> )
<18>
<19> gnorm_vec = torch.zeros(100).cuda()
<20> step = 0
<21>
<22> for i in range(50):
<23> step += 1
<24> g1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1 + (
<25> 0.01 * i
<26> )
<27> g2 = g1.clone()
<28> p2.grad = g2
<29>
<30> current_gnorm, clip_val, gnorm_scale = F.percentile_clipping(
<31> g1, gnorm_vec, step, 5
<32> )
<33> g1 = (g1.float() * gnorm_scale).to(gtype)
<34> p1.grad = g1
<35>
<36> adam1.step()
<37> adam2.step()
<38>
<39> # gnorm_scale is not deterministic (warp reductions), as such there can be slight differences in state
<40> if</s>
|
===========below chunk 0===========
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype, optim_bits", values, ids=names)
def test_adam_percentile_clipping(dim1, dim2, gtype, optim_bits):
# offset: 1
torch.testing.assert_allclose(p1, p2)
torch.testing.assert_allclose(
adam1.state[p1]["state1"],
adam2.state[p2]["state1"],
atol=5e-5,
rtol=1e-4,
)
torch.testing.assert_allclose(
adam1.state[p1]["state2"],
adam2.state[p2]["state2"],
atol=5e-5,
rtol=1e-4,
)
elif optim_bits == 8:
torch.testing.assert_allclose(p1, p2, atol=1e-4, rtol=1e-3)
torch.testing.assert_allclose(
adam1.state[p1]["state1"],
adam2.state[p2]["state1"],
atol=2,
rtol=1e-3,
)
torch.testing.assert_allclose(
adam1.state[p1]["state2"],
adam2.state[p2]["state2"],
atol=2,
rtol=1e-3,
)
adam1.state[p1]["state1"].copy_(adam2.state[p2]["state1"])
adam1.state[p1]["state2"].copy_(adam2.state[p2]["state2"])
if i % 10 == 0 and i > 0:
path = get_temp_dir()
torch.save(adam2.state_dict(), join(path, "opt.pt"))
del adam2
adam2 =</s>
===========below chunk 1===========
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype, optim_bits", values, ids=names)
def test_adam_percentile_clipping(dim1, dim2, gtype, optim_bits):
# offset: 2
<s>adam2.state_dict(), join(path, "opt.pt"))
del adam2
adam2 = None
adam2 = bnb.optim.Adam(
[p2],
lr,
(beta1, beta2),
eps,
optim_bits=optim_bits,
percentile_clipping=5,
)
adam2.load_state_dict(torch.load(join(path, "opt.pt")))
===========changed ref 0===========
# module: tests.test_optim
dim1 = [1024]
dim2 = [32, 1024, 4097]
+ gtype = [torch.float32, torch.float16, torch.bfloat16]
- gtype = [torch.float32, torch.float16]
optimizer_names = [
"adam8bit",
"lion8bit",
"momentum8bit",
"rmsprop8bit",
"adam8bit_blockwise",
"lion8bit_blockwise",
- "lars8bit",
"momentum8bit_blockwise",
"rmsprop8bit_blockwise",
]
values = list(product(dim1, dim2, gtype, optimizer_names))
names = [
"dim1_{}_dim2_{}_gtype_{}_optim_{}".format(*vals) for vals in values
]
===========changed ref 1===========
# module: tests.test_optim
# import apex
k = 20
+ str2bf16support = {}
+ str2bf16support['adam8bit_blockwise'] = True
+
===========changed ref 2===========
# module: tests.test_optim
def assert_most_approx_close(a, b, rtol=1e-3, atol=1e-3, max_error_count=0):
idx = torch.isclose(a, b, rtol, atol)
error_count = (idx == 0).sum().item()
if error_count > max_error_count:
print(f"Too many values not close: assert {error_count} < {max_error_count}")
+ torch.testing.assert_close(a, b, rtol, atol)
- torch.testing.assert_allclose(a, b, rtol, atol)
===========changed ref 3===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 4===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 7===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 8===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 9===========
# module: tests.test_functional
- #print((time.time()-t0)/1e6)
-
===========changed ref 10===========
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
+
===========changed ref 11===========
# module: bitsandbytes.functional
+ def dequantize_fp4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'fp4')
+
===========changed ref 12===========
# module: bitsandbytes.functional
+ def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
+
===========changed ref 13===========
# module: bitsandbytes.functional
+ def dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'nf4')
+
===========changed ref 14===========
# module: bitsandbytes.nn.modules
+ class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
+
===========changed ref 15===========
# module: bitsandbytes.nn.modules
+ class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
+
|
|
tests.test_modules/assert_all_approx_close
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<4>:<add> torch.testing.assert_close(a, b, rtol, atol)
<del> torch.testing.assert_allclose(a, b, rtol, atol)
|
# module: tests.test_modules
def assert_all_approx_close(a, b, atol=1e-8, rtol=1e-5, count=10):
<0> idx = torch.isclose(a, b, rtol, atol)
<1> sumval = (idx == 0).sum().item()
<2> if sumval > count:
<3> print(f"Too many values not close: assert {sumval} < {count}")
<4> torch.testing.assert_allclose(a, b, rtol, atol)
<5>
|
===========changed ref 0===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 1===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 2===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 3===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 4===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 6===========
# module: tests.test_functional
- #print((time.time()-t0)/1e6)
-
===========changed ref 7===========
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
+
===========changed ref 8===========
# module: bitsandbytes.functional
+ def dequantize_fp4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'fp4')
+
===========changed ref 9===========
# module: bitsandbytes.functional
+ def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
+
===========changed ref 10===========
# module: bitsandbytes.functional
+ def dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'nf4')
+
===========changed ref 11===========
# module: bitsandbytes.nn.modules
+ class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
+
===========changed ref 12===========
# module: bitsandbytes.nn.modules
+ class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
+
===========changed ref 13===========
# module: bitsandbytes.autograd._functions
+ def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
+ assert quant_state is not None
+ return MatMul4Bit.apply(A, B, out, bias, quant_state)
+
===========changed ref 14===========
# module: tests.test_optim
# import apex
k = 20
+ str2bf16support = {}
+ str2bf16support['adam8bit_blockwise'] = True
+
===========changed ref 15===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ @classmethod
+ def get_instance(cls):
+ if cls._instance is None:
+ cls._instance = cls.__new__(cls)
+ cls._instance.initialize()
+ return cls._instance
+
===========changed ref 16===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
+ self.error = False
===========changed ref 17===========
# module: bitsandbytes.optim.adam
+ class PagedAdam32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 18===========
# module: bitsandbytes.optim.adam
+ class PagedAdam8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 19===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 20===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 21===========
# module: bitsandbytes.optim.adam
+ class PagedAdam(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
|
tests.test_modules/test_linear8bitlt_accumulated_gradient
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<0>:<del> l1 = torch.nn.Sequential(
<1>:<add> l1 = torch.nn.Sequential(*[bnb.nn.Linear8bitLt(32, 32).cuda().half() for i in range(2)])
<del> *[bnb.nn.Linear8bitLt(32, 32).cuda().half() for i in range(2)]
<2>:<del> )
<3>:<del> l2 = torch.nn.Sequential(
<4>:<add> l2 = torch.nn.Sequential(*[torch.nn.Linear(32, 32).cuda().half() for i in range(2)])
<del> *[torch.nn.Linear(32, 32).cuda().half() for i in range(2)]
<5>:<del> )
<27>:<add> print(i)
|
# module: tests.test_modules
def test_linear8bitlt_accumulated_gradient():
<0> l1 = torch.nn.Sequential(
<1> *[bnb.nn.Linear8bitLt(32, 32).cuda().half() for i in range(2)]
<2> )
<3> l2 = torch.nn.Sequential(
<4> *[torch.nn.Linear(32, 32).cuda().half() for i in range(2)]
<5> )
<6> l2[0].weight = torch.nn.Parameter(l1[0].weight.clone())
<7> l2[0].bias = torch.nn.Parameter(l1[0].bias.clone())
<8> l2[1].weight = torch.nn.Parameter(l1[1].weight.clone())
<9> l2[1].bias = torch.nn.Parameter(l1[1].bias.clone())
<10> opt1 = bnb.optim.Adam8bit(l1.parameters(), lr=0.001)
<11> opt2 = bnb.optim.Adam8bit(l2.parameters(), lr=0.001)
<12>
<13> acc_steps = 10
<14>
<15> for i in range(10):
<16> b1 = torch.randn(16, 8, 32, device="cuda").half()
<17> o1 = l1(b1)
<18> o2 = l2(b1)
<19> loss1 = o1.mean()
<20> loss2 = o2.mean()
<21> loss1.backward()
<22> loss2.backward()
<23> if i == 2:
<24> assert l1[0].state.CxB is not None
<25> assert l1[1].state.CxB is not None
<26>
<27> if i > 0 and i % acc_steps == 0:
<28> opt1.step()
<29> opt1.zero_grad(True)
<30> opt2.step()
<31> opt2.zero_grad(True)
<32> assert_all_approx_close(
<33> l1[0].weight, l2[0].weight, rtol=1.05, atol=</s>
|
===========below chunk 0===========
# module: tests.test_modules
def test_linear8bitlt_accumulated_gradient():
# offset: 1
)
assert_all_approx_close(
l1[1].weight, l2[1].weight, rtol=1.05, atol=0.01, count=2
)
# we do this copy because otherwise we have small divergences over time that add up
l1[0].weight.data.copy_(l2[0].weight.data)
l1[1].weight.data.copy_(l2[1].weight.data)
else:
torch.testing.assert_allclose(l1[0].weight.grad, l2[0].weight.grad)
torch.testing.assert_allclose(l1[1].weight.grad, l2[1].weight.grad)
===========changed ref 0===========
# module: tests.test_modules
def assert_all_approx_close(a, b, atol=1e-8, rtol=1e-5, count=10):
idx = torch.isclose(a, b, rtol, atol)
sumval = (idx == 0).sum().item()
if sumval > count:
print(f"Too many values not close: assert {sumval} < {count}")
+ torch.testing.assert_close(a, b, rtol, atol)
- torch.testing.assert_allclose(a, b, rtol, atol)
===========changed ref 1===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ _instance = None
+
===========changed ref 2===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def initialize(self):
+ self.paged_tensors = []
+
===========changed ref 3===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ def __init__(self):
+ raise RuntimeError("Call get_instance() instead")
+
===========changed ref 4===========
# module: bitsandbytes.functional
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
+ elementwise_func('fill', A, None, value)
+
===========changed ref 5===========
# module: bitsandbytes.functional
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
+ elementwise_func('_mul', A, B, 0)
+
===========changed ref 6===========
# module: bitsandbytes.functional
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
+ elementwise_func('arange', A, None, 0)
+
===========changed ref 7===========
# module: tests.test_functional
- #print((time.time()-t0)/1e6)
-
===========changed ref 8===========
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
+
===========changed ref 9===========
# module: bitsandbytes.functional
+ def dequantize_fp4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'fp4')
+
===========changed ref 10===========
# module: bitsandbytes.functional
+ def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
+
===========changed ref 11===========
# module: bitsandbytes.functional
+ def dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'nf4')
+
===========changed ref 12===========
# module: bitsandbytes.nn.modules
+ class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
+
===========changed ref 13===========
# module: bitsandbytes.nn.modules
+ class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
+
===========changed ref 14===========
# module: bitsandbytes.autograd._functions
+ def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
+ assert quant_state is not None
+ return MatMul4Bit.apply(A, B, out, bias, quant_state)
+
===========changed ref 15===========
# module: tests.test_optim
# import apex
k = 20
+ str2bf16support = {}
+ str2bf16support['adam8bit_blockwise'] = True
+
===========changed ref 16===========
# module: bitsandbytes.functional
+ class GlobalPageManager:
+ @classmethod
+ def get_instance(cls):
+ if cls._instance is None:
+ cls._instance = cls.__new__(cls)
+ cls._instance.initialize()
+ return cls._instance
+
===========changed ref 17===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def initialize(self):
if not getattr(self, 'initialized', False):
self.has_printed = False
self.lib = None
self.initialized = False
+ self.error = False
===========changed ref 18===========
# module: bitsandbytes.optim.adam
+ class PagedAdam32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 19===========
# module: bitsandbytes.optim.adam
+ class PagedAdam8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 20===========
# module: bitsandbytes.optim.adamw
+ class PagedAdamW32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
|
tests.test_modules/test_linear8bitlt_no_fp16_weights
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
675baa79d2434b8bb779859ad08a73b9c0c82fbe
|
Merge remote-tracking branch 'origin/main' into merge
|
<0>:<del> l1 = (
<1>:<del> bnb.nn.Linear8bitLt(
<2>:<add> l1 = (bnb.nn.Linear8bitLt( 32, 64, threshold=threshold, has_fp16_weights=False, memory_efficient_backward=memory_efficient_backward).cuda().half())
<del> 32, 64, threshold=threshold, has_fp16_weights=False, memory_efficient_backward=memory_efficient_backward
<3>:<del> )
<4>:<del> .cuda()
<5>:<del> .half()
<6>:<del> )
|
# module: tests.test_modules
+ @pytest.mark.parametrize("threshold", [0.0, 2.0])
- @pytest.mark.parametrize("threshold", values, ids=names)
@pytest.mark.parametrize("memory_efficient_backward", [False])
def test_linear8bitlt_no_fp16_weights(threshold, memory_efficient_backward):
<0> l1 = (
<1> bnb.nn.Linear8bitLt(
<2> 32, 64, threshold=threshold, has_fp16_weights=False, memory_efficient_backward=memory_efficient_backward
<3> )
<4> .cuda()
<5> .half()
<6> )
<7> assert l1.weight.dtype == torch.int8
<8>
<9> l1.eval()
<10> for i in range(100):
<11> b1 = torch.randn(16, 8, 32, device="cuda").half()
<12> o1 = l1(b1)
<13> assert o1.dtype == torch.float16
<14>
<15> mlp = MLP8bit(32, 64, threshold=threshold, has_fp16_weights=False).cuda()
<16> assert mlp.fc1.weight.dtype == torch.int8
<17> assert mlp.fc2.weight.dtype == torch.int8
<18>
<19> for i in range(100):
<20> b1 = torch.randn(16, 8, 32, device="cuda").half()
<21> o1 = mlp(b1)
<22> assert o1.dtype == torch.float16
<23> if threshold > 0:
<24> assert mlp.fc1.state.idx is not None
<25> if threshold > 0:
<26> assert mlp.fc2.state.idx is not None
<27>
<28> mlp = (
<29> MLP8bit(32, 64, threshold=threshold, has_fp16_weights=False)
<30> .cuda()
<31> .half()
<32> )
<33> assert mlp.fc1.weight.dtype == torch.int8
<34> assert mlp.fc2.weight.dtype</s>
|
===========below chunk 0===========
# module: tests.test_modules
+ @pytest.mark.parametrize("threshold", [0.0, 2.0])
- @pytest.mark.parametrize("threshold", values, ids=names)
@pytest.mark.parametrize("memory_efficient_backward", [False])
def test_linear8bitlt_no_fp16_weights(threshold, memory_efficient_backward):
# offset: 1
for i in range(100):
b1 = torch.randn(16, 8, 32, device="cuda").half()
o1 = mlp(b1)
assert o1.dtype == torch.float16
if threshold > 0:
assert mlp.fc1.state.idx is not None
if threshold > 0:
assert mlp.fc2.state.idx is not None
mlp = (
MLP8bit(32, 64, threshold=threshold, has_fp16_weights=False)
.half()
.cuda()
)
for i in range(100):
b1 = torch.randn(16, 8, 32, device="cuda").half()
o1 = mlp(b1)
assert o1.dtype == torch.float16
if threshold > 0:
assert mlp.fc1.state.idx is not None
if threshold > 0:
assert mlp.fc2.state.idx is not None
assert mlp.fc1.weight.dtype == torch.int8
assert mlp.fc2.weight.dtype == torch.int8
mlp = (
MLP8bit(
32, 64, threshold=threshold, has_fp16_weights=False, memory_efficient_backward=memory_efficient_backward
)
.half()
.to("cuda")
)
for i in range(100):
b1 = torch.randn(16, 8, 32, device="cuda").half()
o1 = mlp(b1)
assert o1.dtype == torch.float16
if threshold > 0:
assert mlp.</s>
===========below chunk 1===========
# module: tests.test_modules
+ @pytest.mark.parametrize("threshold", [0.0, 2.0])
- @pytest.mark.parametrize("threshold", values, ids=names)
@pytest.mark.parametrize("memory_efficient_backward", [False])
def test_linear8bitlt_no_fp16_weights(threshold, memory_efficient_backward):
# offset: 2
<s>lp(b1)
assert o1.dtype == torch.float16
if threshold > 0:
assert mlp.fc1.state.idx is not None
if threshold > 0:
assert mlp.fc2.state.idx is not None
assert mlp.fc1.weight.dtype == torch.int8
assert mlp.fc2.weight.dtype == torch.int8
assert mlp.fc1.weight.device.type == "cuda"
assert mlp.fc2.weight.device.type == "cuda"
mlp = MLP8bit(
32, 64, threshold=threshold, has_fp16_weights=False, memory_efficient_backward=memory_efficient_backward
)
w1, w2 = mlp.fc1.weight.clone().cuda(), mlp.fc2.weight.clone().cuda() # grab weights before quantization,
mlp = mlp.cuda().half() # and this line triggers quantization
for i in range(100):
b1 = torch.randn(16, 8, 32, device="cuda").half()
o1 = mlp(b1)
assert o1.dtype == torch.float16
if threshold > 0:
assert mlp.fc1.state.idx is not None
if threshold > 0:
assert mlp.fc2.state.idx is not None
assert mlp.fc1.weight.dtype == torch.int8
assert mlp.fc2.weight.dtype == torch.int8
</s>
===========below chunk 2===========
# module: tests.test_modules
+ @pytest.mark.parametrize("threshold", [0.0, 2.0])
- @pytest.mark.parametrize("threshold", values, ids=names)
@pytest.mark.parametrize("memory_efficient_backward", [False])
def test_linear8bitlt_no_fp16_weights(threshold, memory_efficient_backward):
# offset: 3
<s>lp.fc1.weight.device.type == "cuda"
assert mlp.fc2.weight.device.type == "cuda"
if memory_efficient_backward:
b1 = torch.randn(16, 8, 32, device="cuda", requires_grad=True, dtype=torch.half)
o1 = mlp(b1)
assert o1.dtype == torch.float16
assert o1.requires_grad
grad_proj = torch.randn_like(o1)
mlp.zero_grad()
(o1 * grad_proj).sum().backward()
grad_ref = grad_proj.flatten(2) @ w2.half() @ w1.half()
scale = grad_ref.abs().mean()
torch.testing.assert_allclose(b1.grad, grad_ref, rtol=0, atol=0.05 * scale)
idx = torch.isclose(b1.grad, grad_ref, atol=0.01 * scale, rtol=0.1)
assert (idx == 0).sum().item() <= b1.numel() * 0.005
===========changed ref 0===========
# module: tests.test_modules
+ modules = []
+ modules.append(bnb.nn.Linear8bitLt)
+ modules.append(bnb.nn.Linear4bit)
+ modules.append(bnb.nn.LinearFP4)
+ modules.append(bnb.nn.LinearNF4)
+ modules.append(lambda d1, d2: bnb.nn.LinearFP4(d1, d2, compress_statistics=True))
+ modules.append(lambda d1, d2: bnb.nn.LinearNF4(d1, d2, compress_statistics=True))
+ names = ['Int8Lt', '4bit', 'FP4', 'NF4', 'FP4+C', 'NF4+C']
- threshold = [0.0, 2.0]
- values = threshold
- names = [f"threshold_{vals}" for vals in values]
===========changed ref 1===========
# module: tests.test_modules
def assert_all_approx_close(a, b, atol=1e-8, rtol=1e-5, count=10):
idx = torch.isclose(a, b, rtol, atol)
sumval = (idx == 0).sum().item()
if sumval > count:
print(f"Too many values not close: assert {sumval} < {count}")
+ torch.testing.assert_close(a, b, rtol, atol)
- torch.testing.assert_allclose(a, b, rtol, atol)
|
tests.test_modules/test_linear8bitlt_accumulated_gradient
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
4bd11518293ea30c6792a5baf64f0715739a09ca
|
Fixed gradient accumulation test.
|
<2>:<add> l1[0].weight.data.copy_(l2[0].weight.data)
<add> l1[1].weight.data.copy_(l2[1].weight.data)
<add> l1[0].bias.data.copy_(l2[0].bias.data)
<add> l1[1].bias.data.copy_(l2[1].bias.data)
<add>
<del> l2[0].weight = torch.nn.Parameter(l1[0].weight.clone())
<3>:<del> l2[0].bias = torch.nn.Parameter(l1[0].bias.clone())
<4>:<del> l2[1].weight = torch.nn.Parameter(l1[1].weight.clone())
<5>:<del> l2[1].bias = torch.nn.Parameter(l1[1].bias.clone())
<6>:<add> opt1 = bnb.optim.Adam32bit(l1.parameters(), lr=0.001)
<del> opt1 = bnb.optim.Adam8bit(l1.parameters(), lr=0.001)
<7>:<add> opt2 = bnb.optim.Adam32bit(l2.parameters(), lr=0.001)
<del> opt2 = bnb.optim.Adam8bit(l2.parameters(), lr=0.001)
<23>:<del> print(i)
|
# module: tests.test_modules
def test_linear8bitlt_accumulated_gradient():
<0> l1 = torch.nn.Sequential(*[bnb.nn.Linear8bitLt(32, 32).cuda().half() for i in range(2)])
<1> l2 = torch.nn.Sequential(*[torch.nn.Linear(32, 32).cuda().half() for i in range(2)])
<2> l2[0].weight = torch.nn.Parameter(l1[0].weight.clone())
<3> l2[0].bias = torch.nn.Parameter(l1[0].bias.clone())
<4> l2[1].weight = torch.nn.Parameter(l1[1].weight.clone())
<5> l2[1].bias = torch.nn.Parameter(l1[1].bias.clone())
<6> opt1 = bnb.optim.Adam8bit(l1.parameters(), lr=0.001)
<7> opt2 = bnb.optim.Adam8bit(l2.parameters(), lr=0.001)
<8>
<9> acc_steps = 10
<10>
<11> for i in range(10):
<12> b1 = torch.randn(16, 8, 32, device="cuda").half()
<13> o1 = l1(b1)
<14> o2 = l2(b1)
<15> loss1 = o1.mean()
<16> loss2 = o2.mean()
<17> loss1.backward()
<18> loss2.backward()
<19> if i == 2:
<20> assert l1[0].state.CxB is not None
<21> assert l1[1].state.CxB is not None
<22>
<23> print(i)
<24> if i > 0 and i % acc_steps == 0:
<25> opt1.step()
<26> opt1.zero_grad(True)
<27> opt2.step()
<28> opt2.zero_grad(True)
<29> assert_all_approx_close(
<30> l1[0].weight, l2[0].weight, rtol=1.05, atol=0.01, count=2</s>
|
===========below chunk 0===========
# module: tests.test_modules
def test_linear8bitlt_accumulated_gradient():
# offset: 1
assert_all_approx_close(
l1[1].weight, l2[1].weight, rtol=1.05, atol=0.01, count=2
)
# we do this copy because otherwise we have small divergences over time that add up
l1[0].weight.data.copy_(l2[0].weight.data)
l1[1].weight.data.copy_(l2[1].weight.data)
else:
torch.testing.assert_close(l1[0].weight.grad, l2[0].weight.grad)
torch.testing.assert_close(l1[1].weight.grad, l2[1].weight.grad)
===========unchanged ref 0===========
at: bitsandbytes.nn.modules
Linear8bitLt(input_features, output_features, bias=True, has_fp16_weights=True, memory_efficient_backward=False, threshold=0.0, index=None)
at: bitsandbytes.optim.adam
Adam32bit(params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False)
at: bitsandbytes.optim.optimizer.Optimizer8bit
step(closure=None)
at: tests.test_modules
assert_all_approx_close(a, b, atol=1e-8, rtol=1e-5, count=10)
===========unchanged ref 1===========
at: torch._C._VariableFunctions
randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional</s>
===========unchanged ref 2===========
at: torch.nn.modules.container
Sequential(*args: Module)
Sequential(arg: 'OrderedDict[str, Module]')
at: torch.nn.modules.linear
Linear(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None)
at: torch.nn.modules.module.Module
dump_patches: bool = False
_version: int = 1
training: bool
_parameters: Dict[str, Optional[Parameter]]
_buffers: Dict[str, Optional[Tensor]]
_non_persistent_buffers_set: Set[str]
_backward_pre_hooks: Dict[int, Callable]
_backward_hooks: Dict[int, Callable]
_is_full_backward_hook: Optional[bool]
_forward_hooks: Dict[int, Callable]
_forward_hooks_with_kwargs: Dict[int, bool]
_forward_hooks_always_called: Dict[int, bool]
_forward_pre_hooks: Dict[int, Callable]
_forward_pre_hooks_with_kwargs: Dict[int, bool]
_state_dict_hooks: Dict[int, Callable]
_load_state_dict_pre_hooks: Dict[int, Callable]
_state_dict_pre_hooks: Dict[int, Callable]
_load_state_dict_post_hooks: Dict[int, Callable]
_modules: Dict[str, Optional['Module']]
call_super_init: bool = False
_compiled_call_impl : Optional[Callable] = None
forward: Callable[..., Any] = _forward_unimplemented
cuda(device: Optional[Union[int, device]]=None) -> T
half() -> T
__call__ : Callable[..., Any] = _wrapped_call_impl
T_destination = TypeVar('T_destination', bound=Dict[str, Any])
parameters(recurse: bool=True) -> Iterator[Parameter]
===========unchanged ref 3===========
at: torch.optim.optimizer.Optimizer
OptimizerPreHook: TypeAlias = Callable[[Self, Args, Kwargs], Optional[Tuple[Args, Kwargs]]] # type: ignore[misc]
OptimizerPostHook: TypeAlias = Callable[[Self, Args, Kwargs], None] # type: ignore[misc]
_optimizer_step_pre_hooks: Dict[int, OptimizerPreHook]
_optimizer_step_post_hooks: Dict[int, OptimizerPostHook]
_optimizer_state_dict_pre_hooks: 'OrderedDict[int, Callable[["Optimizer"], None]]'
_optimizer_state_dict_post_hooks: 'OrderedDict[int, Callable[["Optimizer", StateDict], Optional[StateDict]]]'
_optimizer_load_state_dict_pre_hooks: 'OrderedDict[int, Callable[["Optimizer", StateDict], Optional[StateDict]]]'
_optimizer_load_state_dict_post_hooks: 'OrderedDict[int, Callable[["Optimizer"], None]]'
_disable_dynamo(fn=None)
inner(*args, fn, **kwargs)
|
tests.test_functional/test_bench_matmul
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
2bce175d156b5c5c1be925cb57fe33215675fafd
|
Fixed Makefile.
|
<0>:<add> iters = 80
<del> iters = 1
<12>:<add> linear8bit = bnb.nn.Linear8bitLt(model, hidden, False, False).cuda().half()
<del> linear8bit = bnb.nn.Linear8bitLt(model, hidden, False).cuda().half()
<18>:<add> linearMixedBit = (bnb.nn.Linear8bitLt(model, hidden, False, False, threshold=6.0).cuda().half())
<del> linearMixedBit = (bnb.nn.Linear8bitLt(model, hidden, False, threshold=6.0).cuda().half())
<19>:<add> #linearMixedBit.eval()
<del> linearMixedBit.eval()
|
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
<0> iters = 1
<1> formatB = F.get_special_format_str()
<2>
<3> A = torch.randn(batch, seq, model, device="cuda").half()
<4> B = torch.empty(hidden, model, dtype=torch.float16, device="cuda")
<5> torch.nn.init.xavier_uniform_(B)
<6>
<7> B_fp4, state = F.quantize_fp4(B)
<8> B_fp4_c, state_c = F.quantize_fp4(B, compress_statistics=True)
<9>
<10> B_nf4, state_nf4= F.quantize_nf4(B)
<11>
<12> linear8bit = bnb.nn.Linear8bitLt(model, hidden, False).cuda().half()
<13> linear8bit.eval()
<14>
<15> outliers = torch.randint(0, model, size=(5,)).cuda()
<16> A[:, :, outliers] = 8.0
<17>
<18> linearMixedBit = (bnb.nn.Linear8bitLt(model, hidden, False, threshold=6.0).cuda().half())
<19> linearMixedBit.eval()
<20>
<21> linear8bit_train = bnb.nn.Linear8bitLt(model, hidden, False).cuda().half()
<22> linear8bit_train_thresh = bnb.nn.Linear8bitLt(model, hidden, False, threshold=6.0).cuda().half()
<23>
<24> # warmup
<25> for i in range(iters):
<26> torch.matmul(A, B.t())
<27> torch.cuda.synchronize()
<28> print("")
<29>
<30> torch.cuda.synchronize()
<31> t0 = time.time()
<32> for i in range(iters):
<33> torch.matmul(A,</s>
|
===========below chunk 0===========
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
# offset: 1
torch.cuda.synchronize()
print( f"pytorch fp16: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
bnb.matmul_4bit(A, B_fp4.t(), quant_state=state)
torch.cuda.synchronize()
print( f"bnb fp4: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
bnb.matmul_4bit(A, B_fp4.t(), quant_state=state_c)
torch.cuda.synchronize()
print( f"bnb fp4 + compressed stats: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
bnb.matmul_4bit(A, B_nf4.t(), quant_state=state_nf4)
torch.cuda.synchronize()
print( f"bnb nf4: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
===========unchanged ref 0===========
at: _pytest.mark.structures
MARK_GEN = MarkGenerator(_ispytest=True)
at: _pytest.mark.structures.MarkGenerator
skip: _SkipMarkDecorator
skipif: _SkipifMarkDecorator
xfail: _XfailMarkDecorator
parametrize: _ParametrizeMarkDecorator
usefixtures: _UsefixturesMarkDecorator
filterwarnings: _FilterwarningsMarkDecorator
at: bitsandbytes.autograd._functions
matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor=None, bias=None)
at: bitsandbytes.functional
get_special_format_str()
quantize_fp4(A: Tensor, absmax: Tensor=None, out: Tensor=None, blocksize=64, compress_statistics=False)
quantize_nf4(A: Tensor, absmax: Tensor=None, out: Tensor=None, blocksize=64, compress_statistics=False)
at: bitsandbytes.nn.modules
Linear8bitLt(input_features, output_features, bias=True, has_fp16_weights=True, memory_efficient_backward=False, threshold=0.0, index=None)
at: tests.test_functional
values = []
at: time
time() -> float
at: torch._C
float16: dtype = ...
===========unchanged ref 1===========
at: torch._C._VariableFunctions
empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
matmul(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor
===========unchanged ref 2===========
randint(low: Union[_int, SymInt], high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randint(high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randint(low: Union[_int, SymInt], high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randint(high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randint(high: _int, size: _size, *, generator: Optional[Generator]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor
randint(low: _int, high: _int, size: _size, *, generator: Optional[Generator]=</s>
|
bitsandbytes.optim.lion/Lion.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
1b8772a8f33fdb47df0c849302cbb7e703571b8c
|
Added PagedLion and bf16 Lion.
|
<0>:<del> super().__init__(
<1>:<del> "lion",
<2>:<del> params,
<3>:<del> lr,
<4>:<del> betas,
<5>:<del> 0.,
<6>:<del> weight_decay,
<7>:<del> optim_bits,
<8>:<del> args,
<9>:<del> min_8bit_size,
<10>:<del> percentile_clipping,
<11>:<del> block_wise,
<12>:<del> )
<13>:<add> super().__init__("lion", params, lr, betas, 0., weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
|
<s>bits=32,
- args=None,
- min_8bit_size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
<0> super().__init__(
<1> "lion",
<2> params,
<3> lr,
<4> betas,
<5> 0.,
<6> weight_decay,
<7> optim_bits,
<8> args,
<9> min_8bit_size,
<10> percentile_clipping,
<11> block_wise,
<12> )
<13>
|
===========unchanged ref 0===========
at: bitsandbytes.optim.optimizer
Optimizer1State(optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, is_paged=False)
at: bitsandbytes.optim.optimizer.Optimizer1State
__init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, is_paged=False)
__init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, is_paged=False)
|
bitsandbytes.optim.lion/Lion8bit.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
1b8772a8f33fdb47df0c849302cbb7e703571b8c
|
Added PagedLion and bf16 Lion.
|
<0>:<del> super().__init__(
<1>:<del> "lion",
<2>:<del> params,
<3>:<del> lr,
<4>:<del> betas,
<5>:<del> 0.,
<6>:<del> weight_decay,
<7>:<del> 8,
<8>:<del> args,
<9>:<del> min_8bit_size,
<10>:<del> percentile_clipping,
<11>:<del> block_wise,
<12>:<del> )
<13>:<add> super().__init__("lion", params, lr, betas, 0., weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
|
<s>),
- weight_decay=0,
- args=None,
- min_8bit_size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
<0> super().__init__(
<1> "lion",
<2> params,
<3> lr,
<4> betas,
<5> 0.,
<6> weight_decay,
<7> 8,
<8> args,
<9> min_8bit_size,
<10> percentile_clipping,
<11> block_wise,
<12> )
<13>
|
===========changed ref 0===========
# module: bitsandbytes.optim.lion
+ class PagedLion32bit(Optimizer1State):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__("lion", params, lr, betas, 0., weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 1===========
# module: bitsandbytes.optim.lion
+ class PagedLion8bit(Optimizer1State):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__("lion", params, lr, betas, 0., weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 2===========
# module: bitsandbytes.optim.lion
+ class PagedLion(Optimizer1State):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__("lion", params, lr, betas, 0., weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 3===========
<s>bits=32,
- args=None,
- min_8bit_size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "lion",
- params,
- lr,
- betas,
- 0.,
- weight_decay,
- optim_bits,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__("lion", params, lr, betas, 0., weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
|
bitsandbytes.optim.lion/Lion32bit.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
1b8772a8f33fdb47df0c849302cbb7e703571b8c
|
Added PagedLion and bf16 Lion.
|
<0>:<del> super().__init__(
<1>:<del> "lion",
<2>:<del> params,
<3>:<del> lr,
<4>:<del> betas,
<5>:<del> 0.,
<6>:<del> weight_decay,
<7>:<del> 32,
<8>:<del> args,
<9>:<del> min_8bit_size,
<10>:<del> percentile_clipping,
<11>:<del> block_wise,
<12>:<del> )
<13>:<add> super().__init__("lion", params, lr, betas, 0., weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
|
<s>),
- weight_decay=0,
- args=None,
- min_8bit_size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
<0> super().__init__(
<1> "lion",
<2> params,
<3> lr,
<4> betas,
<5> 0.,
<6> weight_decay,
<7> 32,
<8> args,
<9> min_8bit_size,
<10> percentile_clipping,
<11> block_wise,
<12> )
<13>
|
===========changed ref 0===========
# module: bitsandbytes.optim.lion
+ class PagedLion32bit(Optimizer1State):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__("lion", params, lr, betas, 0., weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 1===========
# module: bitsandbytes.optim.lion
+ class PagedLion8bit(Optimizer1State):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__("lion", params, lr, betas, 0., weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 2===========
# module: bitsandbytes.optim.lion
+ class PagedLion(Optimizer1State):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__("lion", params, lr, betas, 0., weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 3===========
<s>),
- weight_decay=0,
- args=None,
- min_8bit_size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "lion",
- params,
- lr,
- betas,
- 0.,
- weight_decay,
- 8,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__("lion", params, lr, betas, 0., weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
===========changed ref 4===========
<s>bits=32,
- args=None,
- min_8bit_size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "lion",
- params,
- lr,
- betas,
- 0.,
- weight_decay,
- optim_bits,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__("lion", params, lr, betas, 0., weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
|
tests.test_optim/assert_most_approx_close
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
1b8772a8f33fdb47df0c849302cbb7e703571b8c
|
Added PagedLion and bf16 Lion.
|
<0>:<add> idx = torch.isclose(a, b, rtol=rtol, atol=atol)
<del> idx = torch.isclose(a, b, rtol, atol)
<4>:<add> torch.testing.assert_close(a, b, rtol=rtol, atol=atol)
<del> torch.testing.assert_close(a, b, rtol, atol)
|
# module: tests.test_optim
def assert_most_approx_close(a, b, rtol=1e-3, atol=1e-3, max_error_count=0):
<0> idx = torch.isclose(a, b, rtol, atol)
<1> error_count = (idx == 0).sum().item()
<2> if error_count > max_error_count:
<3> print(f"Too many values not close: assert {error_count} < {max_error_count}")
<4> torch.testing.assert_close(a, b, rtol, atol)
<5>
|
===========changed ref 0===========
# module: bitsandbytes.optim.lion
+ class PagedLion32bit(Optimizer1State):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__("lion", params, lr, betas, 0., weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 1===========
# module: bitsandbytes.optim.lion
+ class PagedLion8bit(Optimizer1State):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__("lion", params, lr, betas, 0., weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 2===========
# module: bitsandbytes.optim.lion
+ class PagedLion(Optimizer1State):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__("lion", params, lr, betas, 0., weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 3===========
<s>),
- weight_decay=0,
- args=None,
- min_8bit_size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "lion",
- params,
- lr,
- betas,
- 0.,
- weight_decay,
- 32,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__("lion", params, lr, betas, 0., weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
===========changed ref 4===========
<s>),
- weight_decay=0,
- args=None,
- min_8bit_size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "lion",
- params,
- lr,
- betas,
- 0.,
- weight_decay,
- 8,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__("lion", params, lr, betas, 0., weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
===========changed ref 5===========
<s>bits=32,
- args=None,
- min_8bit_size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "lion",
- params,
- lr,
- betas,
- 0.,
- weight_decay,
- optim_bits,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__("lion", params, lr, betas, 0., weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
===========changed ref 6===========
# module: bitsandbytes.functional
name2qmap = {}
if COMPILED_WITH_CUDA:
"""C FUNCTIONS FOR OPTIMIZERS"""
str2optimizer32bit = {}
str2optimizer32bit["adam"] = (lib.cadam32bit_grad_fp32, lib.cadam32bit_grad_fp16, lib.cadam32bit_grad_bf16)
str2optimizer32bit["momentum"] = (
lib.cmomentum32bit_grad_32,
lib.cmomentum32bit_grad_16,
)
str2optimizer32bit["rmsprop"] = (
lib.crmsprop32bit_grad_32,
lib.crmsprop32bit_grad_16,
)
- str2optimizer32bit["lion"] = (
- lib.clion32bit_grad_32,
- lib.clion32bit_grad_16,
- )
+ str2optimizer32bit["lion"] = (lib.clion32bit_grad_fp32, lib.clion32bit_grad_fp16, lib.clion32bit_grad_bf16)
str2optimizer32bit["adagrad"] = (
lib.cadagrad32bit_grad_32,
lib.cadagrad32bit_grad_16,
)
str2optimizer8bit = {}
str2optimizer8bit["adam"] = (
lib.cadam_static_8bit_grad_32,
lib.cadam_static_8bit_grad_16,
)
str2optimizer8bit["momentum"] = (
lib.cmomentum_static_8bit_grad_32,
lib.cmomentum_static_8bit_grad_16,
)
str2optimizer8bit["rmsprop"] = (
lib.crmsprop_static_8bit_grad_32,
lib.crmsprop_static_8bit_grad_16,
)
str2optimizer8bit["lion"] = (
lib.clion</s>
|
tests.test_optim/test_optimizer32bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
1b8772a8f33fdb47df0c849302cbb7e703571b8c
|
Added PagedLion and bf16 Lion.
|
<0>:<add> if gtype == torch.bfloat16 and optim_name in ['momentum', 'rmsprop']: pytest.skip()
|
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer32bit(dim1, dim2, gtype, optim_name):
<0> if dim1 == 1 and dim2 == 1:
<1> return
<2> p1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1
<3> p2 = p1.clone()
<4> p1 = p1.float()
<5>
<6> torch_optimizer = str2optimizers[optim_name][0]([p1])
<7> bnb_optimizer = str2optimizers[optim_name][1]([p2])
<8>
<9> if gtype == torch.float32:
<10> atol, rtol = 1e-6, 1e-5
<11> elif gtype == torch.bfloat16:
<12> atol, rtol = 1e-3, 1e-2
<13> else:
<14> atol, rtol = 1e-4, 1e-3
<15>
<16> for i in range(k):
<17> g = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.01
<18> p1.grad = g.clone().float()
<19> p2.grad = g.clone()
<20>
<21> bnb_optimizer.step()
<22> torch_optimizer.step()
<23>
<24>
<25> for name1, name2 in str2statenames[optim_name]:
<26> torch.testing.assert_close(
<27> torch_optimizer.state[p1][name1],
<28> bnb_optimizer.state[p2][name2].cuda(),
<29> atol=atol,
<30> rtol=rtol,
<31> )
<32>
<33> # since Lion can have pretty noisy updates where things lie at the boundary
<34> # allow up to 10 errors for Lion
<35> assert_most_approx_close(p1, p2.float(), atol, rtol, max_error_count=10)
<36>
<37> </s>
|
===========below chunk 0===========
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer32bit(dim1, dim2, gtype, optim_name):
# offset: 1
path = get_temp_dir()
torch.save(bnb_optimizer.state_dict(), join(path, "opt.pt"))
del bnb_optimizer
bnb_optimizer = None
bnb_optimizer = str2optimizers[optim_name][1]([p2])
bnb_optimizer.load_state_dict(torch.load(join(path, "opt.pt")))
rm_path(path)
# since Lion can have pretty noisy updates where things lie at the boundary
# allow up to 10 errors for Lion
assert_most_approx_close(p1, p2.float(), atol, rtol, max_error_count=10)
for name1, name2 in str2statenames[optim_name]:
# since Lion can have pretty noisy updates where things lie at the boundary
# allow up to 10 errors for Lion
assert_most_approx_close(torch_optimizer.state[p1][name1], bnb_optimizer.state[p2][name2],
atol=atol, rtol=rtol,
max_error_count=10)
if gtype != torch.float32:
# the adam buffers should also be close because they are 32-bit
# but the paramters can diverge because they are 16-bit
# the difference grow larger and larger with each update
# --> copy the state to keep weights close
p1.data = p1.data.to(p2.dtype).float()
p2.copy_(p1.data)
torch.testing.assert_close(p1.to(p2.dtype), p2)
if optim_name in ["lars", "lamb"]:
assert bnb_optimizer.state[p2]["unorm_vec"] > 0.0
===========changed ref 0===========
# module: tests.test_optim
def assert_most_approx_close(a, b, rtol=1e-3, atol=1e-3, max_error_count=0):
+ idx = torch.isclose(a, b, rtol=rtol, atol=atol)
- idx = torch.isclose(a, b, rtol, atol)
error_count = (idx == 0).sum().item()
if error_count > max_error_count:
print(f"Too many values not close: assert {error_count} < {max_error_count}")
+ torch.testing.assert_close(a, b, rtol=rtol, atol=atol)
- torch.testing.assert_close(a, b, rtol, atol)
===========changed ref 1===========
# module: bitsandbytes.optim.lion
+ class PagedLion32bit(Optimizer1State):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__("lion", params, lr, betas, 0., weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 2===========
# module: bitsandbytes.optim.lion
+ class PagedLion8bit(Optimizer1State):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__("lion", params, lr, betas, 0., weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 3===========
# module: bitsandbytes.optim.lion
+ class PagedLion(Optimizer1State):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super().__init__("lion", params, lr, betas, 0., weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
+
===========changed ref 4===========
<s>),
- weight_decay=0,
- args=None,
- min_8bit_size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "lion",
- params,
- lr,
- betas,
- 0.,
- weight_decay,
- 32,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__("lion", params, lr, betas, 0., weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
===========changed ref 5===========
<s>),
- weight_decay=0,
- args=None,
- min_8bit_size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "lion",
- params,
- lr,
- betas,
- 0.,
- weight_decay,
- 8,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__("lion", params, lr, betas, 0., weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
===========changed ref 6===========
<s>bits=32,
- args=None,
- min_8bit_size=4096,
- percentile_clipping=100,
- block_wise=True,
- ):
+ def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
- super().__init__(
- "lion",
- params,
- lr,
- betas,
- 0.,
- weight_decay,
- optim_bits,
- args,
- min_8bit_size,
- percentile_clipping,
- block_wise,
- )
+ super().__init__("lion", params, lr, betas, 0., weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
|
tests.test_optim/test_optimizer8bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
1b8772a8f33fdb47df0c849302cbb7e703571b8c
|
Added PagedLion and bf16 Lion.
|
<0>:<add> if gtype == torch.bfloat16 and optim_name not in ['adam8bit_blockwise', 'lion8bit_blockwise']: pytest.skip()
<del> if gtype == torch.bfloat16 and optim_name not in str2bf16support: return
|
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer8bit(dim1, dim2, gtype, optim_name):
<0> if gtype == torch.bfloat16 and optim_name not in str2bf16support: return
<1> if dim1 == 1 and dim2 == 1:
<2> return
<3> p1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1
<4> p2 = p1.clone()
<5> p1 = p1.float()
<6> blocksize = 2048
<7>
<8> torch_optimizer = str2optimizers[optim_name][0]([p1])
<9> bnb_optimizer = str2optimizers[optim_name][1]([p2])
<10>
<11> if gtype == torch.float32:
<12> atol, rtol = 3e-3, 1e-3
<13> patol, prtol = 1e-5, 1e-3
<14> elif gtype == torch.bfloat16:
<15> atol, rtol = 3e-3, 1e-3
<16> patol, prtol = 1e-4, 1e-2
<17> else:
<18> atol, rtol = 3e-3, 1e-3
<19> patol, prtol = 1e-5, 1e-3
<20>
<21> errors = []
<22> relerrors = []
<23>
<24> for i in range(100):
<25> g = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.01
<26> p1.grad = g.clone().float()
<27> p2.grad = g.clone()
<28>
<29> bnb_optimizer.step()
<30> torch_optimizer.step()
<31>
<32> # since Lion can have pretty noisy updates where things lie at the boundary
<33> # allow up to 5 errors for Lion
<34> assert_most_approx_close(p1, p2.float(), patol, prtol, max_</s>
|
===========below chunk 0===========
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer8bit(dim1, dim2, gtype, optim_name):
# offset: 1
dequant_states = []
for name1, name2, qmap, max_val in str2statenames[optim_name]:
# print(bnb_optimizer.state[p2][max_val], name1)
if "blockwise" in optim_name:
s1 = F.dequantize_blockwise(
code=bnb_optimizer.state[p2][qmap],
absmax=bnb_optimizer.state[p2][max_val],
A=bnb_optimizer.state[p2][name2],
blocksize=blocksize,
)
else:
s1 = F.dequantize(
code=bnb_optimizer.state[p2][qmap],
absmax=bnb_optimizer.state[p2][max_val],
A=bnb_optimizer.state[p2][name2],
)
num_not_close = (
torch.isclose(
torch_optimizer.state[p1][name1], s1, atol=atol, rtol=rtol
)
== 0
)
#assert num_not_close.sum().item() < 20
dequant_states.append(s1.clone())
err = torch.abs(p1 - p2)
relerr = err / (torch.abs(p1)+1e-9)
if g.dtype == torch.bfloat16:
assert err.mean() < 0.00015
assert relerr.mean() < 0.0016
else:
assert err.mean() < 0.00012
assert relerr.mean() < 0.0012
errors.append(err.mean().item())
relerrors.append(relerr.mean().item())
if i % 10 == 0 and i > 0:
for (</s>
===========below chunk 1===========
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer8bit(dim1, dim2, gtype, optim_name):
# offset: 2
<s> relerrors.append(relerr.mean().item())
if i % 10 == 0 and i > 0:
for (name1, name2, qmap, max_val), s in zip(
str2statenames[optim_name], dequant_states
):
s1cpy = s.clone()
raws1cpy = bnb_optimizer.state[p2][name2].clone()
qmap1 = bnb_optimizer.state[p2][qmap].clone()
path = get_temp_dir()
torch.save(bnb_optimizer.state_dict(), join(path, "opt.pt"))
del bnb_optimizer
bnb_optimizer = None
bnb_optimizer = str2optimizers[optim_name][1]([p2])
bnb_optimizer.load_state_dict(torch.load(join(path, "opt.pt")))
rm_path(path)
torch.testing.assert_close(raws1cpy, bnb_optimizer.state[p2][name2])
torch.testing.assert_close(qmap1, bnb_optimizer.state[p2][qmap])
if "blockwise" in optim_name:
s1 = F.dequantize_blockwise(
code=bnb_optimizer.state[p2][qmap],
absmax=bnb_optimizer.state[p2][max_val],
A=bnb_optimizer.state[p2][name2],
blocksize=blocksize,
)
else:
s1 = F.dequantize(
code=bnb_optimizer.state[p2][qmap],
abs</s>
===========below chunk 2===========
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer8bit(dim1, dim2, gtype, optim_name):
# offset: 3
<s>bnb_optimizer.state[p2][max_val],
A=bnb_optimizer.state[p2][name2],
)
torch.testing.assert_close(s1cpy, s1)
num_not_close = (torch.isclose(torch_optimizer.state[p1][name1], s1, atol=atol, rtol=rtol) == 0)
assert num_not_close.sum().item() < 20
# since Lion can have pretty noisy updates where things lie at the boundary
# allow up to 5 errors for Lion
assert_most_approx_close(p1, p2.float(), patol, prtol, max_error_count=5)
# the parameters diverge quickly. Here we keep them close
# together so we can test against the Adam error
p1.data = p1.data.to(gtype).float()
p2.copy_(p1.data)
torch.testing.assert_close(p1.to(gtype), p2)
for (name1, name2, qmap, max_val), s in zip(str2statenames[optim_name], dequant_states):
torch_optimizer.state[p1][name1].copy_(s.data)
===========changed ref 0===========
# module: tests.test_optim
def assert_most_approx_close(a, b, rtol=1e-3, atol=1e-3, max_error_count=0):
+ idx = torch.isclose(a, b, rtol=rtol, atol=atol)
- idx = torch.isclose(a, b, rtol, atol)
error_count = (idx == 0).sum().item()
if error_count > max_error_count:
print(f"Too many values not close: assert {error_count} < {max_error_count}")
+ torch.testing.assert_close(a, b, rtol=rtol, atol=atol)
- torch.testing.assert_close(a, b, rtol, atol)
|
tests.test_functional/test_bench_4bit_dequant
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
b7f04e2a2064575d0c636a89d98a7075c46151e1
|
Added lookup table.
|
<12>:<add> iters = 100
<del> iters = 5
|
<s> blocksize, quant_type)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
+ #@pytest.mark.parametrize("quant_type", ['fp4', 'nf4'])
- @pytest.mark.parametrize("quant_type", ['fp4', 'nf4'])
+ @pytest.mark.parametrize("quant_type", ['nf4'])
def test_bench_4bit_dequant(quant_type):
<0> blocksize = 256
<1> a = torch.rand(1024*12*4, 1024*12, device='cuda').half()
<2> qa, SA = F.quantize_4bit(a, blocksize=blocksize, quant_type=quant_type)
<3>
<4> input_size = a.numel()/2
<5> output_size = a.numel()*2
<6> num_bytes = input_size+output_size
<7> GB = num_bytes/1e9
<8> max_theoretical_s = GB/768
<9> #print(max_theoretical_s*1e6)
<10> b = torch.randn(128, 1024*12, device='cuda').half()
<11>
<12> iters = 5
<13> torch.cuda.synchronize()
<14> t0 = time.time()
<15> for i in range(iters):
<16> F.dequantize_4bit(qa, SA, blocksize=blocksize, quant_type=quant_type)
<17> #b.copy_(a)
<18> torch.cuda.synchronize()
<19>
| |
tests.test_functional/test_gemm_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
b7f04e2a2064575d0c636a89d98a7075c46151e1
|
Added lookup table.
|
# module: tests.test_functional
#@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16], ids=['fp16'])
def test_gemm_4bit(dtype):
<0> #for dim in [32, 64, 128, 256, 512, 1024, 2048, 4096]:
<1> #for dim in [4096, 5120, 6656, 8192]:
<2> #for dim in [32]:
<3> for dim in [4096]:
<4> errs = []
<5> relerrs = []
<6> max_err = 0
<7> max_relerr = 0
<8> for i in range(1):
<9> #A = torch.rand(2, 4092, dtype=dtype, device='cuda')
<10> #B = torch.rand(4*4092, 4092, dtype=dtype, device='cuda')
<11> #A = torch.rand(1, 4096, dtype=dtype, device='cuda')
<12> #B = torch.rand(4*4096, 4096, dtype=dtype, device='cuda')
<13> A = torch.randn(1, dim+0, dtype=dtype, device='cuda')
<14> B = torch.randn(4*dim, dim+0, dtype=dtype, device='cuda')/math.sqrt(dim)
<15>
<16> #print('')
<17> #print(A)
<18> #print(B.t())
<19> #A[:, :-1] = 0
<20> #B[:, :-1] = 0
<21>
<22> qB, state = F.quantize_nf4(B)
<23> F.dequantize_nf4(qB, state)
<24>
<25> C3 = torch.matmul(A, B.t())
<26> C2 = F.cutlass3_gemm(A, qB.t(), state=state)
<27> C1 = bnb.matmul_4bit(A, qB.t(), state)</s>
|
===========below chunk 0===========
# module: tests.test_functional
#@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16], ids=['fp16'])
def test_gemm_4bit(dtype):
# offset: 1
print(C1.shape, C2.shape)
# tensor cores are non-deterministic
# so we need to analyze errors around the mean
# to test our implementation
err = torch.abs(C1-C2)
mag = torch.abs(C1)+1e-8
relerr = err/mag
max_err = max(err.max(), max_err)
max_relerr = max(relerr.max(), max_relerr)
err = err.mean().item()
relerr = relerr.mean().item()
errs.append(err)
relerrs.append(relerr)
if err/torch.abs(C1).mean() > 5e-5 or err > 3.2e-5:
print('')
print(i, err, relerr)
print(A.flatten()[-6:])
print(B.flatten()[-6:])
out = A.flatten()[-6:]*B.flatten()[-6:]
print(out)
print(out[:-1].sum())
print('='*80)
print(C1.flatten()[-6:])
print(C2.flatten()[-6:])
#assert False, 'ERROR'
c = int(C1.numel()*0.0014*(dim/256))+1
c = assert_all_approx_close(C1, C2, 1e-5, 0.01, count=c, throw=False)
#print(c/math.sqrt(dim))
print('')
print(dim, sum(errs)/len(errs)/math.sqrt(dim))
print(dim, sum(relerrs)/</s>
===========below chunk 1===========
# module: tests.test_functional
#@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16], ids=['fp16'])
def test_gemm_4bit(dtype):
# offset: 2
<s>dim, sum(errs)/len(errs)/math.sqrt(dim))
print(dim, sum(relerrs)/len(relerrs)/math.sqrt(dim))
print(dim, (max_err.item(), max_relerr.item()))
===========changed ref 0===========
<s> blocksize, quant_type)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
+ #@pytest.mark.parametrize("quant_type", ['fp4', 'nf4'])
- @pytest.mark.parametrize("quant_type", ['fp4', 'nf4'])
+ @pytest.mark.parametrize("quant_type", ['nf4'])
def test_bench_4bit_dequant(quant_type):
blocksize = 256
a = torch.rand(1024*12*4, 1024*12, device='cuda').half()
qa, SA = F.quantize_4bit(a, blocksize=blocksize, quant_type=quant_type)
input_size = a.numel()/2
output_size = a.numel()*2
num_bytes = input_size+output_size
GB = num_bytes/1e9
max_theoretical_s = GB/768
#print(max_theoretical_s*1e6)
b = torch.randn(128, 1024*12, device='cuda').half()
+ iters = 100
- iters = 5
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
F.dequantize_4bit(qa, SA, blocksize=blocksize, quant_type=quant_type)
#b.copy_(a)
torch.cuda.synchronize()
|
|
tests.test_functional/test_gemm_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
e54d2730fc033489be1ee61dab5ac5e22f798527
|
Added debugging functions.
|
<3>:<add> for dim in [32]:
<del> for dim in [4096]:
|
# module: tests.test_functional
#@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16], ids=['fp16'])
def test_gemm_4bit(dtype):
<0> #for dim in [32, 64, 128, 256, 512, 1024, 2048, 4096]:
<1> #for dim in [4096, 5120, 6656, 8192]:
<2> #for dim in [32]:
<3> for dim in [4096]:
<4> errs = []
<5> relerrs = []
<6> max_err = 0
<7> max_relerr = 0
<8> for i in range(1):
<9> #A = torch.rand(2, 4092, dtype=dtype, device='cuda')
<10> #B = torch.rand(4*4092, 4092, dtype=dtype, device='cuda')
<11> #A = torch.rand(1, 4096, dtype=dtype, device='cuda')
<12> #B = torch.rand(4*4096, 4096, dtype=dtype, device='cuda')
<13> A = torch.randn(1, dim+0, dtype=dtype, device='cuda')
<14> B = torch.randn(4*dim, dim+0, dtype=dtype, device='cuda')/math.sqrt(dim)
<15>
<16> #print('')
<17> #print(A)
<18> #print(B.t())
<19> #A[:, :-1] = 0
<20> #B[:, :-1] = 0
<21>
<22> qB, state = F.quantize_nf4(B)
<23> F.dequantize_nf4(qB, state)
<24>
<25> C3 = torch.matmul(A, B.t())
<26> C2 = F.cutlass3_gemm(A, qB.t(), state=state)
<27> C1 = bnb.matmul_4bit(A, qB.t(), state)</s>
|
===========below chunk 0===========
# module: tests.test_functional
#@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16], ids=['fp16'])
def test_gemm_4bit(dtype):
# offset: 1
print(C1)
print(C2)
#print(C1.shape, C2.shape)
# tensor cores are non-deterministic
# so we need to analyze errors around the mean
# to test our implementation
err = torch.abs(C1-C2)
mag = torch.abs(C1)+1e-8
relerr = err/mag
max_err = max(err.max(), max_err)
max_relerr = max(relerr.max(), max_relerr)
err = err.mean().item()
relerr = relerr.mean().item()
print(err)
errs.append(err)
relerrs.append(relerr)
if err/torch.abs(C1).mean() > 5e-5 or err > 3.2e-5:
print('')
print(i, err, relerr)
print(A.flatten()[-6:])
print(B.flatten()[-6:])
out = A.flatten()[-6:]*B.flatten()[-6:]
print(out)
print(out[:-1].sum())
print('='*80)
print(C1.flatten()[-6:])
print(C2.flatten()[-6:])
#assert False, 'ERROR'
c = int(C1.numel()*0.0014*(dim/256))+1
c = assert_all_approx_close(C1, C2, 1e-5, 0.01, count=c, throw=False)
#print(c/math.sqrt(dim))
print('')
print(dim, sum(errs)/len(</s>
===========below chunk 1===========
# module: tests.test_functional
#@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16], ids=['fp16'])
def test_gemm_4bit(dtype):
# offset: 2
<s> #print(c/math.sqrt(dim))
print('')
print(dim, sum(errs)/len(errs)/math.sqrt(dim))
print(dim, sum(relerrs)/len(relerrs)/math.sqrt(dim))
print(dim, (max_err.item(), max_relerr.item()))
|
bitsandbytes.functional/cutlass3_gemm
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
f89ff93e26d02037db30e88053983d6bb12dd660
|
Initial 4-bit naive batch size 1, 81 vs 185.
|
# module: bitsandbytes.functional
def cutlass3_gemm(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
<0> #sout = check_matmul(A, B, out, transposed_A, transposed_B, expected_type=A.dtype)
<1> if state is None:
<2> Bshape = B.shape
<3> bout = Bshape[1]
<4> else:
<5> Bshape = state[1]
<6> bout = Bshape[0]
<7> if out is None:
<8> out = torch.zeros(size=(A.shape[0], bout), dtype=A.dtype, device=A.device)
<9>
<10> sA = A.shape
<11> sB = B.shape
<12> if transposed_A and len(sA) == 2:
<13> sA = (sA[1], sA[0])
<14> elif transposed_A and len(sA) == 3:
<15> sA = (sA[0], sA[2], sA[0])
<16> if transposed_B and len(sB) == 2:
<17> sB = (sB[1], sB[0])
<18> elif transposed_B and len(sB) == 3:
<19> sB = (sB[0], sB[2], sB[0])
<20> # this is a mess: cuBLAS expect column major, but PyTorch is row major.
<21> # So to perform the matrix multiplication, we have to treat A, B, and C matrices
<22> # (transpose of row major is column major)
<23> # This means we compute B^T A^T = C^T and we explicitly switch the dimensions of each of these
<24>
<25> # matrices in the input arguments for cuBLAS
<26> # column major: A @ B = C: [m, k] @ [k, n] = [m, n]
<27> # row major: B^T @ A^T = C^T: [</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def cutlass3_gemm(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
# offset: 1
# column major with row major layout: B^T @ A^T = C^T: [k, m] @ [n, k] = [n, m]
if len(sB) == 2:
if B.stride()[0] == B.shape[1]:
transposed_B = False
elif B.stride()[1] == B.shape[0]:
transposed_B = True
if len(A.shape) == 2:
if A.stride()[0] == A.shape[1]:
transposed_A = False
elif A.stride()[1] == A.shape[0]:
transposed_A = True
else:
if A.stride()[1] == A.shape[2]:
transposed_A = False
elif A.stride()[2] == A.shape[1]:
transposed_A = True
if len(sA) == 2:
n = sA[0]
ldb = A.stride()[1 if transposed_A else 0]
elif len(sA) == 3 and len(sB) == 2:
n = sA[0] * sA[1]
ldb = sA[2]
m = sB[1]
k = sB[0]
lda = B.stride()[0]
ldc = sB[1]
elif len(sB) == 3:
# special case
assert len(sA) == 3
if not (sA[0] == sB[0] and sA[1] == sB[1]):
raise ValueError(
f"Only bsi,bso->io supported for tensor contractions, but dims for A x B were: {sA} x {sB}"
)
trans</s>
===========below chunk 1===========
# module: bitsandbytes.functional
def cutlass3_gemm(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
# offset: 2
<s> supported for tensor contractions, but dims for A x B were: {sA} x {sB}"
)
transposed_A = True
transposed_B = False
m = sB[2]
n = sA[2]
k = sB[0] * sB[1]
lda = n
ldb = sA[2]
ldc = m
ptr = CUBLAS_Context.get_instance().get_context(A.device)
# B^T @ A^T = C^T
# [km, nk -> mn]
#lda = ldb = ldc = 1
#lda = 1
if state is not None:
m = Bshape[0]
k = Bshape[1]
lda = Bshape[0]
ldc = Bshape[0]
ldb = (ldb+1)//2
#print(m, n, k, lda, ldb, ldc)
is_on_gpu([B, A, out])
m = ct.c_int32(m)
n = ct.c_int32(n)
k = ct.c_int32(k)
lda = ct.c_int32(lda)
ldb = ct.c_int32(ldb)
ldc = ct.c_int32(ldc)
if B.dtype == torch.uint8:
lib.cgemm_4bit_inference(m, n, k, get_ptr(A), get_ptr(B), get_ptr(state[0]), get_ptr(out), lda,</s>
===========below chunk 2===========
# module: bitsandbytes.functional
def cutlass3_gemm(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
# offset: 3
<s>, ldc, ct.c_int32(state[3]))
elif A.dtype == torch.float32:
lib.cgemm_host_fp32(m, n, k, get_ptr(A), get_ptr(B), get_ptr(out), lda, ldb, ldc)
elif A.dtype == torch.float16:
lib.cgemm_host_fp16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(out), lda, ldb, ldc)
else:
raise NotImplementedError(f'Matmul not implemented for data type {A.dtype}')
return out
|
|
tests.test_functional/test_bench_matmul
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
f89ff93e26d02037db30e88053983d6bb12dd660
|
Initial 4-bit naive batch size 1, 81 vs 185.
|
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
<0> iters = 80
<1> formatB = F.get_special_format_str()
<2>
<3> A = torch.randn(batch, seq, model, device="cuda").half()
<4> B = torch.empty(hidden, model, dtype=torch.float16, device="cuda")
<5> torch.nn.init.xavier_uniform_(B)
<6>
<7> B_fp4, state = F.quantize_fp4(B)
<8> B_fp4_c, state_c = F.quantize_fp4(B, compress_statistics=True)
<9>
<10> B_nf4, state_nf4= F.quantize_nf4(B)
<11>
<12> linear8bit = bnb.nn.Linear8bitLt(model, hidden, False, False).cuda().half()
<13> linear8bit.eval()
<14>
<15> outliers = torch.randint(0, model, size=(5,)).cuda()
<16> A[:, :, outliers] = 8.0
<17>
<18> linearMixedBit = (bnb.nn.Linear8bitLt(model, hidden, False, False, threshold=6.0).cuda().half())
<19> #linearMixedBit.eval()
<20>
<21> linear8bit_train = bnb.nn.Linear8bitLt(model, hidden, False).cuda().half()
<22> linear8bit_train_thresh = bnb.nn.Linear8bitLt(model, hidden, False, threshold=6.0).cuda().half()
<23>
<24> # warmup
<25> for i in range(iters):
<26> torch.matmul(A, B.t())
<27> torch.cuda.synchronize()
<28> print("")
<29>
<30> torch.cuda.synchronize()
<31> t0 = time.time()
<32> for i in range(iters):
<33> torch.</s>
|
===========below chunk 0===========
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
# offset: 1
torch.cuda.synchronize()
print( f"pytorch fp16: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
bnb.matmul_4bit(A, B_fp4.t(), quant_state=state)
torch.cuda.synchronize()
print( f"bnb fp4: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
bnb.matmul_4bit(A, B_fp4.t(), quant_state=state_c)
torch.cuda.synchronize()
print( f"bnb fp4 + compressed stats: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
bnb.matmul_4bit(A, B_nf4.t(), quant_state=state_nf4)
torch.cuda.synchronize()
print( f"bnb nf4: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
#torch.cuda.synchronize()
</s>
===========below chunk 1===========
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
# offset: 2
<s>time.time()-t0:.4f}s" )
#torch.cuda.synchronize()
#t0 = time.time()
#for i in range(iters):
# bnb.matmul(A, B)
#torch.cuda.synchronize()
#print(f"CB -> CxB conversion (each iteration): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
#torch.cuda.synchronize()
#t0 = time.time()
#for i in range(iters):
# bnb.matmul(A, B, threshold=6.0)
#torch.cuda.synchronize()
#print(f"CB -> CxB conversion + threshold: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
#CA, CAt, SCA, SCAt, coo_tensorA = F.double_quant(A, threshold=0.0)
#C32A, SA = F.transform(CA, "col32")
#CB, CBt, SCB, SCBt, coo_tensorB = F.double_quant(B)
#CxB, SB = F.transform(CB, to_order=formatB)
#torch.cuda.synchronize()
#t0 = time.time()
#for i in range(iters):
# out32, Sout32 = F.igemmlt(C32A, CxB, SA</s>
===========below chunk 2===========
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
# offset: 3
<s>B)
#torch.cuda.synchronize()
#print(f"no overhead matmul-lt: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
#BA, statsB = F.vectorwise_quant(B, dim=1)
#CxB, SB = F.nvidia_transform(CB, to_order=formatB)
#torch.cuda.synchronize()
#t0 = time.time()
#for i in range(iters):
# A2 = A.view(-1, A.shape[-1]).contiguous()
# CA, statsA = F.vectorwise_quant(A2, dim=1)
# C32A, SA = F.nvidia_transform(CA, "col32")
# out32, Sout32 = F.igemmlt(C32A, CxB, SA, SB)
# Cout, Sout = F.nvidia_transform(out32, "row", state=Sout32)
# F.vectorwise_mm_dequant(Cout, statsA, statsB.t())
#torch.cuda.synchronize()
#print(f"vector pytorch + nvidia: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
#BA, statsB = F.vectorwise_quant(B, dim=1, quant_type="linear")
#CxB, SB = F.nvidia_transform(</s>
===========below chunk 3===========
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
# offset: 4
<s> to_order=formatB)
#torch.cuda.synchronize()
#t0 = time.time()
#for i in range(iters):
# A2 = A.view(-1, A.shape[-1]).contiguous()
# CA, statsA = F.vectorwise_quant(A2, dim=1, quant_type="linear")
# C32A, SA = F.nvidia_transform(CA, "col32")
# out32, Sout32 = F.igemmlt(C32A, CxB, SA, SB)
# Cout, Sout = F.nvidia_transform(out32, "row", state=Sout32)
# out = Cout * statsB * statsA * (1.0 / (127 * 127))
#torch.cuda.synchronize()
#print(f"linear pytorch + nvidia: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
linear8bit(A)
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
linear8bit(A)
torch.cuda.synchronize()
print( f"bnb linear8bitlt (eval): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
linearMixedBit(A)
torch.cuda.synchronize()
t0 = time.time()
for i in range(</s>
|
|
tests.test_functional/test_gemm_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
f89ff93e26d02037db30e88053983d6bb12dd660
|
Initial 4-bit naive batch size 1, 81 vs 185.
|
<0>:<add> print('')
<3>:<add> for dim in [4096]:
<add> #for dim in [5120]:
<add> #for dim in [6656]:
<add> #for dim in [128]:
<del> for dim in [32]:
<13>:<add> A = torch.randn(1, dim+2, dtype=dtype, device='cuda')
<del> A = torch.randn(1, dim+0, dtype=dtype, device='cuda')
<14>:<add> B = torch.randn(4*dim, dim+2, dtype=dtype, device='cuda')/math.sqrt(dim)
<del> B = torch.randn(4*dim, dim+0, dtype=dtype, device='cuda')/math.sqrt(dim)
<15>:<add> #B = torch.randn(1, dim+2, dtype=dtype, device='cuda')/math.sqrt(dim)
<21>:<add> #A.flatten()[:-1] = 0
<add> #B.flatten()[:-1] = 0
<25>:<add> #C3 = torch.matmul(A, B.t())
<del> C3 = torch.matmul(A, B.t())
|
# module: tests.test_functional
#@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16], ids=['fp16'])
def test_gemm_4bit(dtype):
<0> #for dim in [32, 64, 128, 256, 512, 1024, 2048, 4096]:
<1> #for dim in [4096, 5120, 6656, 8192]:
<2> #for dim in [32]:
<3> for dim in [32]:
<4> errs = []
<5> relerrs = []
<6> max_err = 0
<7> max_relerr = 0
<8> for i in range(1):
<9> #A = torch.rand(2, 4092, dtype=dtype, device='cuda')
<10> #B = torch.rand(4*4092, 4092, dtype=dtype, device='cuda')
<11> #A = torch.rand(1, 4096, dtype=dtype, device='cuda')
<12> #B = torch.rand(4*4096, 4096, dtype=dtype, device='cuda')
<13> A = torch.randn(1, dim+0, dtype=dtype, device='cuda')
<14> B = torch.randn(4*dim, dim+0, dtype=dtype, device='cuda')/math.sqrt(dim)
<15>
<16> #print('')
<17> #print(A)
<18> #print(B.t())
<19> #A[:, :-1] = 0
<20> #B[:, :-1] = 0
<21>
<22> qB, state = F.quantize_nf4(B)
<23> F.dequantize_nf4(qB, state)
<24>
<25> C3 = torch.matmul(A, B.t())
<26> C2 = F.cutlass3_gemm(A, qB.t(), state=state)
<27> C1 = bnb.matmul_4bit(A, qB.t(), state)
<28> </s>
|
===========below chunk 0===========
# module: tests.test_functional
#@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16], ids=['fp16'])
def test_gemm_4bit(dtype):
# offset: 1
print(C2)
#print(C1.shape, C2.shape)
# tensor cores are non-deterministic
# so we need to analyze errors around the mean
# to test our implementation
err = torch.abs(C1-C2)
mag = torch.abs(C1)+1e-8
relerr = err/mag
max_err = max(err.max(), max_err)
max_relerr = max(relerr.max(), max_relerr)
err = err.mean().item()
relerr = relerr.mean().item()
print(err)
errs.append(err)
relerrs.append(relerr)
if err/torch.abs(C1).mean() > 5e-5 or err > 3.2e-5:
print('')
print(i, err, relerr)
print(A.flatten()[-6:])
print(B.flatten()[-6:])
out = A.flatten()[-6:]*B.flatten()[-6:]
print(out)
print(out[:-1].sum())
print('='*80)
print(C1.flatten()[-6:])
print(C2.flatten()[-6:])
#assert False, 'ERROR'
c = int(C1.numel()*0.0014*(dim/256))+1
c = assert_all_approx_close(C1, C2, 1e-5, 0.01, count=c, throw=False)
#print(c/math.sqrt(dim))
print('')
print(dim, sum(errs)/len(errs)/math.sqrt(</s>
===========below chunk 1===========
# module: tests.test_functional
#@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16], ids=['fp16'])
def test_gemm_4bit(dtype):
# offset: 2
<s>.sqrt(dim))
print('')
print(dim, sum(errs)/len(errs)/math.sqrt(dim))
print(dim, sum(relerrs)/len(relerrs)/math.sqrt(dim))
print(dim, (max_err.item(), max_relerr.item()))
===========unchanged ref 0===========
at: _pytest.mark.structures
MARK_GEN = MarkGenerator(_ispytest=True)
at: _pytest.mark.structures.MarkGenerator
parametrize: _ParametrizeMarkDecorator
at: bitsandbytes.autograd._functions
matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor=None, bias=None)
at: bitsandbytes.functional
quantize_nf4(A: Tensor, absmax: Tensor=None, out: Tensor=None, blocksize=64, compress_statistics=False)
dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor]=None, absmax: Tensor=None, out: Tensor=None, blocksize: int=64) -> Tensor
cutlass3_gemm(A: Tensor, B: Tensor, out: Tensor=None, transposed_A=False, transposed_B=False, state=None)
at: math
sqrt(x: SupportsFloat, /) -> float
at: torch._C
float16: dtype = ...
at: torch._C._VariableFunctions
abs(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor
===========unchanged ref 1===========
randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device:</s>
|
tests.test_functional/test_gemm_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
dfe6900b94a0b38c649ea39b2dd12392c835195f
|
Vectorized loads, conflict free NF4; 52 vs 172.
|
<4>:<add> for dim in [2*4096]:
<del> for dim in [4096]:
<7>:<add> #for dim in [4]:
<del> #for dim in [128]:
<12>:<add> for i in range(100):
<del> for i in range(1):
<17>:<add> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<del> A = torch.randn(1, dim+2, dtype=dtype, device='cuda')
<18>:<add> B = torch.randn(4*dim, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<del> B = torch.randn(4*dim, dim+2, dtype=dtype, device='cuda')/math.sqrt(dim)
|
# module: tests.test_functional
#@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16], ids=['fp16'])
def test_gemm_4bit(dtype):
<0> print('')
<1> #for dim in [32, 64, 128, 256, 512, 1024, 2048, 4096]:
<2> #for dim in [4096, 5120, 6656, 8192]:
<3> #for dim in [32]:
<4> for dim in [4096]:
<5> #for dim in [5120]:
<6> #for dim in [6656]:
<7> #for dim in [128]:
<8> errs = []
<9> relerrs = []
<10> max_err = 0
<11> max_relerr = 0
<12> for i in range(1):
<13> #A = torch.rand(2, 4092, dtype=dtype, device='cuda')
<14> #B = torch.rand(4*4092, 4092, dtype=dtype, device='cuda')
<15> #A = torch.rand(1, 4096, dtype=dtype, device='cuda')
<16> #B = torch.rand(4*4096, 4096, dtype=dtype, device='cuda')
<17> A = torch.randn(1, dim+2, dtype=dtype, device='cuda')
<18> B = torch.randn(4*dim, dim+2, dtype=dtype, device='cuda')/math.sqrt(dim)
<19> #B = torch.randn(1, dim+2, dtype=dtype, device='cuda')/math.sqrt(dim)
<20>
<21> #print('')
<22> #print(A)
<23> #print(B.t())
<24> #A[:, :-1] = 0
<25> #B[:, :-1] = 0
<26> #A.flatten()[:-1] = 0
<27> #B.flatten()[:-1] = 0
<28>
<29> qB, state</s>
|
===========below chunk 0===========
# module: tests.test_functional
#@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16], ids=['fp16'])
def test_gemm_4bit(dtype):
# offset: 1
F.dequantize_nf4(qB, state)
#C3 = torch.matmul(A, B.t())
C2 = F.cutlass3_gemm(A, qB.t(), state=state)
C1 = bnb.matmul_4bit(A, qB.t(), state)
#print(state)
#print(qB)
#print('')
#print(A)
#print(B)
#print('='*89)
#print(C1)
#print(C2)
#print(C3)
#print(C1.shape, C2.shape)
# tensor cores are non-deterministic
# so we need to analyze errors around the mean
# to test our implementation
err = torch.abs(C1-C2)
mag = torch.abs(C1)+1e-8
relerr = err/mag
max_err = max(err.max(), max_err)
max_relerr = max(relerr.max(), max_relerr)
err = err.mean().item()
relerr = relerr.mean().item()
#print(err)
errs.append(err)
relerrs.append(relerr)
if err/torch.abs(C1).mean() > 5e-5 or err > 3.2e-5:
print('')
print(i, err, relerr)
#print(A.flatten()[-6:])
#print(B.flatten()[-6:])
#out = A.flatten()[-6:]*B.flatten()[-6:]
#print(out)
</s>
===========below chunk 1===========
# module: tests.test_functional
#@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16], ids=['fp16'])
def test_gemm_4bit(dtype):
# offset: 2
<s>:])
#out = A.flatten()[-6:]*B.flatten()[-6:]
#print(out)
#print(out[:-1].sum())
print('='*80)
#print(C1.flatten()[-6:])
#print(C2.flatten()[-6:])
#assert False, 'ERROR'
c = int(C1.numel()*0.0014*(dim/256))+1
c = assert_all_approx_close(C1, C2, 1e-5, 0.01, count=c, throw=False)
print(c/math.sqrt(dim))
print('')
print(dim, sum(errs)/len(errs)/math.sqrt(dim))
print(dim, sum(relerrs)/len(relerrs)/math.sqrt(dim))
print(dim, (max_err.item(), max_relerr.item()))
|
bitsandbytes.autograd._functions/matmul_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
02fd80cb814285984415fd903278b8217c18c4df
|
Added bfloat16 quantizations and tests.
|
<1>:<add> if A.numel() == A.shape[-1] and A.requires_grad == False:
<add> return F.cutlass3_gemm(A, B.t(), out, state=quant_state)
<add> else:
<add> return MatMul4Bit.apply(A, B, out, bias, quant_state)
<del> return MatMul4Bit.apply(A, B, out, bias, quant_state)
|
# module: bitsandbytes.autograd._functions
def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
<0> assert quant_state is not None
<1> return MatMul4Bit.apply(A, B, out, bias, quant_state)
<2>
|
===========unchanged ref 0===========
at: bitsandbytes.autograd._functions
tensor = torch.Tensor
at: torch._tensor.Tensor.__setstate__
self.requires_grad, _, self._backward_hooks = state
at: typing
List = _alias(list, 1, inst=False, name='List')
|
bitsandbytes.functional/quantize_blockwise
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
02fd80cb814285984415fd903278b8217c18c4df
|
Added bfloat16 quantizations and tests.
|
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
<0> """
<1> Quantize tensor A in blocks of size 4096 values.
<2>
<3> Quantizes tensor A by dividing it into blocks of 4096 values.
<4> Then the absolute maximum value within these blocks is calculated
<5> for the non-linear quantization.
<6>
<7> Parameters
<8> ----------
<9> A : torch.Tensor
<10> The input tensor.
<11> code : torch.Tensor
<12> The quantization map.
<13> absmax : torch.Tensor
<14> The absmax values.
<15> out : torch.Tensor
<16> The output tensor (8-bit).
<17>
<18> Returns
<19> -------
<20> torch.Tensor:
<21> The 8-bit tensor.
<22> tuple(torch.Tensor, torch.Tensor):
<23> The quantization state to undo the quantization.
<24> """
<25>
<26>
<27> if code is None:
<28> if "dynamic" not in name2qmap:
<29> name2qmap["dynamic"] = create_dynamic_map().to(A.device)
<30> code = name2qmap["dynamic"]
<31>
<32> if absmax is None:
<33> n = A.numel()
<34> blocks = n // blocksize
<35> blocks += 1 if n % blocksize > 0 else 0
<36> absmax = torch.zeros((blocks,), device=A.device)
<37>
<38> if out is None:
<39> out = torch.zeros_like(A, dtype=torch.uint8)
<40>
<41> if A.device.type != 'cpu':
<42> assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64]
<43> cblocksize = ct.c_int32(blocksize)
<44> prev_device = pre_call(A.device)
<45> code = code.to(A.device)
<46> is_on_gpu([code, A, out, absmax])
<47> if A.</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
# offset: 1
lib.cquantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
elif A.dtype == torch.float16:
lib.cquantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
# cpu
code = code.cpu()
lib.cquantize_blockwise_cpu_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
if nested:
offset = absmax.mean()
absmax -= offset
qabsmax, state2 = quantize_blockwise(absmax, blocksize=blocksize, nested=False)
state = [qabsmax, code, blocksize, nested, offset, state2]
else:
state = [absmax, code, blocksize, nested, None, None]
return out, state
===========changed ref 0===========
# module: bitsandbytes.autograd._functions
def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
assert quant_state is not None
+ if A.numel() == A.shape[-1] and A.requires_grad == False:
+ return F.cutlass3_gemm(A, B.t(), out, state=quant_state)
+ else:
+ return MatMul4Bit.apply(A, B, out, bias, quant_state)
- return MatMul4Bit.apply(A, B, out, bias, quant_state)
|
|
bitsandbytes.functional/dequantize_blockwise
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
02fd80cb814285984415fd903278b8217c18c4df
|
Added bfloat16 quantizations and tests.
|
<31>:<add> if quant_state is None:
<add> quant_state = (absmax, code, blocksize, False, torch.float32, None, None)
<add>
<add> absmax, code, blocksize, nested, dtype, offset, state2 = quant_state
<add> if nested:
<add> absmax = dequantize_blockwise(absmax, state2)
<add> absmax += offset
<add>
<32>:<del> out = torch.zeros_like(A, dtype=torch.float32)
<33>:<del>
<34>:<del> if quant_state is None:
<35>:<del> quant_state = (absmax, code, blocksize)
<36>:<del> assert absmax is not None and out is not None
<37>:<del> else:
<38>:<del> absmax, code, blocksize, nested, offset, state2 = quant_state
<39>:<del> if nested:
<40>:<del> absmax = dequantize_blockwise(absmax, state2)
<41>:<del> absmax += offset
<42>:<del>
<43>:<add> out = torch.empty(A.shape, dtype=dtype, device=A.device)
|
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
<0> """
<1> Dequantizes blockwise quantized values.
<2>
<3> Dequantizes the tensor A with maximum absolute values absmax in
<4> blocks of size 4096.
<5>
<6> Parameters
<7> ----------
<8> A : torch.Tensor
<9> The input 8-bit tensor.
<10> quant_state : tuple(torch.Tensor, torch.Tensor)
<11> Tuple of code and absmax values.
<12> absmax : torch.Tensor
<13> The absmax values.
<14> code : torch.Tensor
<15> The quantization map.
<16> out : torch.Tensor
<17> Dequantized output tensor (default: float32)
<18>
<19>
<20> Returns
<21> -------
<22> torch.Tensor:
<23> Dequantized tensor (default: float32)
<24> """
<25> assert quant_state is not None or absmax is not None
<26> if code is None and quant_state is None:
<27> if "dynamic" not in name2qmap:
<28> name2qmap["dynamic"] = create_dynamic_map().to(A.device)
<29> code = name2qmap["dynamic"]
<30>
<31> if out is None:
<32> out = torch.zeros_like(A, dtype=torch.float32)
<33>
<34> if quant_state is None:
<35> quant_state = (absmax, code, blocksize)
<36> assert absmax is not None and out is not None
<37> else:
<38> absmax, code, blocksize, nested, offset, state2 = quant_state
<39> if nested:
<40> absmax = dequantize_blockwise(absmax, state2)
<41> absmax += offset
<42>
<43>
<44> if A.device.type != 'cpu':
<45> device = pre_call(A.device)
</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
# offset: 1
if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]:
raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048, 4096, 1024, 512, 256, 128, 64]")
is_on_gpu([A, absmax, out])
if out.dtype == torch.float32:
lib.cdequantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
elif out.dtype == torch.float16:
lib.cdequantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
code = code.cpu()
lib.cdequantize_blockwise_cpu_fp32(get_ptr(quant_state[1]), get_ptr(A), get_ptr(quant_state[0]), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
return out
===========changed ref 0===========
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
"""
Quantize tensor A in blocks of size 4096 values.
Quantizes tensor A by dividing it into blocks of 4096 values.
Then the absolute maximum value within these blocks is calculated
for the non-linear quantization.
Parameters
----------
A : torch.Tensor
The input tensor.
code : torch.Tensor
The quantization map.
absmax : torch.Tensor
The absmax values.
out : torch.Tensor
The output tensor (8-bit).
Returns
-------
torch.Tensor:
The 8-bit tensor.
tuple(torch.Tensor, torch.Tensor):
The quantization state to undo the quantization.
"""
if code is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
if absmax is None:
n = A.numel()
blocks = n // blocksize
blocks += 1 if n % blocksize > 0 else 0
absmax = torch.zeros((blocks,), device=A.device)
if out is None:
out = torch.zeros_like(A, dtype=torch.uint8)
if A.device.type != 'cpu':
assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64]
cblocksize = ct.c_int32(blocksize)
prev_device = pre_call(A.device)
code = code.to(A.device)
is_on_gpu([code, A, out, absmax])
if A.dtype == torch.float32:
lib.cquantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblock</s>
===========changed ref 1===========
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
# offset: 1
<s>fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
elif A.dtype == torch.float16:
lib.cquantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
+ elif A.dtype == torch.bfloat16:
+ lib.cquantize_blockwise_bf16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
# cpu
code = code.cpu()
lib.cquantize_blockwise_cpu_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
if nested:
offset = absmax.mean()
absmax -= offset
qabsmax, state2 = quantize_blockwise(absmax, blocksize=blocksize, nested=False)
+ state = [qabsmax, code, blocksize, nested, A.dtype, offset, state2]
- state = [qabsmax, code, blocksize, nested, offset, state2]
else:
+ state = [absmax, code, blocksize, nested, A.dtype</s>
===========changed ref 2===========
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
# offset: 2
<s>, None]
- state = [absmax, code, blocksize, nested, None, None]
-
-
return out, state
===========changed ref 3===========
# module: bitsandbytes.autograd._functions
def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
assert quant_state is not None
+ if A.numel() == A.shape[-1] and A.requires_grad == False:
+ return F.cutlass3_gemm(A, B.t(), out, state=quant_state)
+ else:
+ return MatMul4Bit.apply(A, B, out, bias, quant_state)
- return MatMul4Bit.apply(A, B, out, bias, quant_state)
|
bitsandbytes.functional/quantize_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
02fd80cb814285984415fd903278b8217c18c4df
|
Added bfloat16 quantizations and tests.
|
# module: bitsandbytes.functional
def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4') -> Tensor:
<0> """
<1> Quantize tensor A in blocks of 4-bit values.
<2>
<3> Quantizes tensor A by dividing it into blocks which are independently quantized to FP4.
<4>
<5> Parameters
<6> ----------
<7> A : torch.Tensor
<8> The input tensor.
<9> absmax : torch.Tensor
<10> The absmax values.
<11> out : torch.Tensor
<12> The output tensor (8-bit).
<13> blocksize : int
<14> The blocksize used in quantization.
<15> quant_type : str
<16> The 4-bit quantization data type {fp4, nf4}
<17>
<18> Returns
<19> -------
<20> torch.Tensor:
<21> The 8-bit tensor with packed 4-bit values.
<22> tuple(torch.Tensor, torch.Size, torch.dtype, int):
<23> The quantization state to undo the quantization.
<24> """
<25> if A.device.type != 'cuda':
<26> raise NotImplementedError(f'Device type not supported for FP4 quantization: {A.device.type}')
<27> if quant_type not in ['fp4', 'nf4']:
<28> raise NotImplementedError(f'4-bit quantization data type {quant_type} is not implemented.')
<29>
<30> n = A.numel()
<31> input_shape = A.shape
<32>
<33> if absmax is None:
<34> blocks = n // blocksize
<35> blocks += 1 if n % blocksize > 0 else 0
<36> absmax = torch.zeros((blocks,), device=A.device)
<37>
<38>
<39> if out is None:
<40> out = torch.zeros(((n+1)//2, 1), dtype=torch.uint8, device=A.device)
<41>
<42> assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64]
<43>
<44> prev_device = pre_</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4') -> Tensor:
# offset: 1
is_on_gpu([A, out, absmax])
if A.dtype == torch.float32:
if quant_type == 'fp4':
lib.cquantize_blockwise_fp32_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
else:
lib.cquantize_blockwise_fp32_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
elif A.dtype == torch.float16:
if quant_type == 'fp4':
lib.cquantize_blockwise_fp16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
else:
lib.cquantize_blockwise_fp16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
if compress_statistics:
offset = absmax.mean()
absmax -= offset
#code = create_custom_map().to(absmax.device)
#qabsmax, state2 = quantize_blockwise(absmax, code=code, blocksize=256)
qabsmax, state2 = quantize_blockwise(absmax, blocksize=256)
del absmax
state</s>
===========below chunk 1===========
# module: bitsandbytes.functional
def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4') -> Tensor:
# offset: 2
<s>
qabsmax, state2 = quantize_blockwise(absmax, blocksize=256)
del absmax
state = [qabsmax, input_shape, A.dtype, blocksize, [offset, state2], quant_type]
else:
state = [absmax, input_shape, A.dtype, blocksize, None, quant_type]
return out, state
===========changed ref 0===========
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
"""
Dequantizes blockwise quantized values.
Dequantizes the tensor A with maximum absolute values absmax in
blocks of size 4096.
Parameters
----------
A : torch.Tensor
The input 8-bit tensor.
quant_state : tuple(torch.Tensor, torch.Tensor)
Tuple of code and absmax values.
absmax : torch.Tensor
The absmax values.
code : torch.Tensor
The quantization map.
out : torch.Tensor
Dequantized output tensor (default: float32)
Returns
-------
torch.Tensor:
Dequantized tensor (default: float32)
"""
assert quant_state is not None or absmax is not None
if code is None and quant_state is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
+ if quant_state is None:
+ quant_state = (absmax, code, blocksize, False, torch.float32, None, None)
+
+ absmax, code, blocksize, nested, dtype, offset, state2 = quant_state
+ if nested:
+ absmax = dequantize_blockwise(absmax, state2)
+ absmax += offset
+
if out is None:
- out = torch.zeros_like(A, dtype=torch.float32)
-
- if quant_state is None:
- quant_state = (absmax, code, blocksize)
- assert absmax is not None and out is not None
- else:
- absmax, code, blocksize, nested, offset, state2 = quant_state
</s>
===========changed ref 1===========
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
# offset: 1
<s> out is not None
- else:
- absmax, code, blocksize, nested, offset, state2 = quant_state
- if nested:
- absmax = dequantize_blockwise(absmax, state2)
- absmax += offset
-
+ out = torch.empty(A.shape, dtype=dtype, device=A.device)
if A.device.type != 'cpu':
device = pre_call(A.device)
code = code.to(A.device)
if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]:
raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048, 4096, 1024, 512, 256, 128, 64]")
is_on_gpu([A, absmax, out])
if out.dtype == torch.float32:
lib.cdequantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
elif out.dtype == torch.float16:
lib.cdequantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
+ elif out.dtype == torch.bfloat16:
+ lib.cdequantize_blockwise_bf16(get_ptr(code), get_ptr(A), get_ptr</s>
===========changed ref 2===========
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
# offset: 2
<s>max), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
code = code.cpu()
lib.cdequantize_blockwise_cpu_fp32(get_ptr(quant_state[1]), get_ptr(A), get_ptr(quant_state[0]), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
return out
|
|
bitsandbytes.functional/dequantize_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
02fd80cb814285984415fd903278b8217c18c4df
|
Added bfloat16 quantizations and tests.
|
# module: bitsandbytes.functional
def dequantize_4bit(A: Tensor,quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64, quant_type='fp4') -> Tensor:
<0> """
<1> Dequantizes FP4 blockwise quantized values.
<2>
<3> Dequantizes the tensor A with maximum absolute values absmax in blocks of size blocksize.
<4>
<5> Parameters
<6> ----------
<7> A : torch.Tensor
<8> The input 8-bit tensor (packed 4-bit values).
<9> quant_state : tuple(torch.Tensor, torch.Size, torch.dtype)
<10> Tuple of absmax values, original tensor shape and original dtype.
<11> absmax : torch.Tensor
<12> The absmax values.
<13> out : torch.Tensor
<14> Dequantized output tensor.
<15> blocksize : int
<16> The blocksize used in quantization.
<17> quant_type : str
<18> The 4-bit quantization data type {fp4, nf4}
<19>
<20>
<21> Returns
<22> -------
<23> torch.Tensor:
<24> Dequantized tensor.
<25> """
<26> if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]:
<27> raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048, 4096, 1024, 512, 256, 128, 64]")
<28> if quant_type not in ['fp4', 'nf4']:
<29> raise NotImplementedError(f'4-bit quantization data type {quant_type} is not implemented.')
<30>
<31> if quant_state is None:
<32> assert absmax is not None and out is not None
<33> shape = out.shape
<34> dtype = out.dtype
<35> else:
<36> absmax, shape, dtype, blocksize, compressed_stats, quant_type = quant_state
<37>
<38>
<39> if compressed_stats is not None:
<40> offset, state2 = compressed_stats
<41> absmax = dequantize_blockwise(absmax, state2)
<42> abs</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def dequantize_4bit(A: Tensor,quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64, quant_type='fp4') -> Tensor:
# offset: 1
if out is None:
out = torch.empty(shape, dtype=dtype, device=A.device)
n = out.numel()
device = pre_call(A.device)
is_on_gpu([A, absmax, out])
if out.dtype == torch.float32:
if quant_type == 'fp4':
lib.cdequantize_blockwise_fp32_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
else:
lib.cdequantize_blockwise_fp32_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
elif out.dtype == torch.float16:
if quant_type == 'fp4':
lib.cdequantize_blockwise_fp16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
else:
lib.cdequantize_blockwise_fp16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
is_transposed = (True if A.shape[0] == 1 else False)
if is_transposed: return out.t()
</s>
===========below chunk 1===========
# module: bitsandbytes.functional
def dequantize_4bit(A: Tensor,quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64, quant_type='fp4') -> Tensor:
# offset: 2
<s> = (True if A.shape[0] == 1 else False)
if is_transposed: return out.t()
else: return out
===========changed ref 0===========
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
"""
Dequantizes blockwise quantized values.
Dequantizes the tensor A with maximum absolute values absmax in
blocks of size 4096.
Parameters
----------
A : torch.Tensor
The input 8-bit tensor.
quant_state : tuple(torch.Tensor, torch.Tensor)
Tuple of code and absmax values.
absmax : torch.Tensor
The absmax values.
code : torch.Tensor
The quantization map.
out : torch.Tensor
Dequantized output tensor (default: float32)
Returns
-------
torch.Tensor:
Dequantized tensor (default: float32)
"""
assert quant_state is not None or absmax is not None
if code is None and quant_state is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
+ if quant_state is None:
+ quant_state = (absmax, code, blocksize, False, torch.float32, None, None)
+
+ absmax, code, blocksize, nested, dtype, offset, state2 = quant_state
+ if nested:
+ absmax = dequantize_blockwise(absmax, state2)
+ absmax += offset
+
if out is None:
- out = torch.zeros_like(A, dtype=torch.float32)
-
- if quant_state is None:
- quant_state = (absmax, code, blocksize)
- assert absmax is not None and out is not None
- else:
- absmax, code, blocksize, nested, offset, state2 = quant_state
</s>
===========changed ref 1===========
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
# offset: 1
<s> out is not None
- else:
- absmax, code, blocksize, nested, offset, state2 = quant_state
- if nested:
- absmax = dequantize_blockwise(absmax, state2)
- absmax += offset
-
+ out = torch.empty(A.shape, dtype=dtype, device=A.device)
if A.device.type != 'cpu':
device = pre_call(A.device)
code = code.to(A.device)
if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]:
raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048, 4096, 1024, 512, 256, 128, 64]")
is_on_gpu([A, absmax, out])
if out.dtype == torch.float32:
lib.cdequantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
elif out.dtype == torch.float16:
lib.cdequantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
+ elif out.dtype == torch.bfloat16:
+ lib.cdequantize_blockwise_bf16(get_ptr(code), get_ptr(A), get_ptr</s>
===========changed ref 2===========
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
# offset: 2
<s>max), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
code = code.cpu()
lib.cdequantize_blockwise_cpu_fp32(get_ptr(quant_state[1]), get_ptr(A), get_ptr(quant_state[0]), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
return out
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.