path
stringlengths 9
117
| type
stringclasses 2
values | project
stringclasses 10
values | commit_hash
stringlengths 40
40
| commit_message
stringlengths 1
137
| ground_truth
stringlengths 0
2.74k
| main_code
stringlengths 102
3.37k
| context
stringlengths 0
14.7k
|
---|---|---|---|---|---|---|---|
bitsandbytes.functional/cutlass3_gemm
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
02fd80cb814285984415fd903278b8217c18c4df
|
Added bfloat16 quantizations and tests.
|
# module: bitsandbytes.functional
def cutlass3_gemm(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
<0> #sout = check_matmul(A, B, out, transposed_A, transposed_B, expected_type=A.dtype)
<1> if state is None:
<2> Bshape = B.shape
<3> bout = Bshape[1]
<4> else:
<5> Bshape = state[1]
<6> bout = Bshape[0]
<7> if out is None:
<8> out = torch.zeros(size=(A.shape[0], bout), dtype=A.dtype, device=A.device)
<9>
<10> sA = A.shape
<11> sB = B.shape
<12> if transposed_A and len(sA) == 2:
<13> sA = (sA[1], sA[0])
<14> elif transposed_A and len(sA) == 3:
<15> sA = (sA[0], sA[2], sA[0])
<16> if transposed_B and len(sB) == 2:
<17> sB = (sB[1], sB[0])
<18> elif transposed_B and len(sB) == 3:
<19> sB = (sB[0], sB[2], sB[0])
<20> # this is a mess: cuBLAS expect column major, but PyTorch is row major.
<21> # So to perform the matrix multiplication, we have to treat A, B, and C matrices
<22> # (transpose of row major is column major)
<23> # This means we compute B^T A^T = C^T and we explicitly switch the dimensions of each of these
<24>
<25> # matrices in the input arguments for cuBLAS
<26> # column major: A @ B = C: [m, k] @ [k, n] = [m, n]
<27> # row major: B^T @ A^T = C^T: [</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def cutlass3_gemm(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
# offset: 1
# column major with row major layout: B^T @ A^T = C^T: [k, m] @ [n, k] = [n, m]
if len(sB) == 2:
if B.stride()[0] == B.shape[1]:
transposed_B = False
elif B.stride()[1] == B.shape[0]:
transposed_B = True
if len(A.shape) == 2:
if A.stride()[0] == A.shape[1]:
transposed_A = False
elif A.stride()[1] == A.shape[0]:
transposed_A = True
else:
if A.stride()[1] == A.shape[2]:
transposed_A = False
elif A.stride()[2] == A.shape[1]:
transposed_A = True
if len(sA) == 2:
n = sA[0]
ldb = A.stride()[1 if transposed_A else 0]
elif len(sA) == 3 and len(sB) == 2:
n = sA[0] * sA[1]
ldb = sA[2]
m = sB[1]
k = sB[0]
lda = B.stride()[0]
ldc = sB[1]
elif len(sB) == 3:
# special case
assert len(sA) == 3
if not (sA[0] == sB[0] and sA[1] == sB[1]):
raise ValueError(
f"Only bsi,bso->io supported for tensor contractions, but dims for A x B were: {sA} x {sB}"
)
trans</s>
===========below chunk 1===========
# module: bitsandbytes.functional
def cutlass3_gemm(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
# offset: 2
<s> supported for tensor contractions, but dims for A x B were: {sA} x {sB}"
)
transposed_A = True
transposed_B = False
m = sB[2]
n = sA[2]
k = sB[0] * sB[1]
lda = n
ldb = sA[2]
ldc = m
ptr = CUBLAS_Context.get_instance().get_context(A.device)
# B^T @ A^T = C^T
# [km, nk -> mn]
#lda = ldb = ldc = 1
#lda = 1
if state is not None:
m = Bshape[0]
k = Bshape[1]
lda = Bshape[0]
ldc = Bshape[0]
ldb = (ldb+1)//2
#print(m, n, k, lda, ldb, ldc)
is_on_gpu([B, A, out])
m = ct.c_int32(m)
n = ct.c_int32(n)
k = ct.c_int32(k)
lda = ct.c_int32(lda)
ldb = ct.c_int32(ldb)
ldc = ct.c_int32(ldc)
if B.dtype == torch.uint8:
lib.cgemm_4bit_inference_naive(m, n, k, get_ptr(A), get_ptr(B), get_ptr(state[0]), get_ptr(out),</s>
===========below chunk 2===========
# module: bitsandbytes.functional
def cutlass3_gemm(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
# offset: 3
<s>, ldb, ldc, ct.c_int32(state[3]))
elif A.dtype == torch.float32:
lib.cgemm_host_fp32(m, n, k, get_ptr(A), get_ptr(B), get_ptr(out), lda, ldb, ldc)
elif A.dtype == torch.float16:
lib.cgemm_host_fp16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(out), lda, ldb, ldc)
else:
raise NotImplementedError(f'Matmul not implemented for data type {A.dtype}')
return out
===========changed ref 0===========
# module: bitsandbytes.autograd._functions
def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
assert quant_state is not None
+ if A.numel() == A.shape[-1] and A.requires_grad == False:
+ return F.cutlass3_gemm(A, B.t(), out, state=quant_state)
+ else:
+ return MatMul4Bit.apply(A, B, out, bias, quant_state)
- return MatMul4Bit.apply(A, B, out, bias, quant_state)
===========changed ref 1===========
# module: bitsandbytes.functional
def dequantize_4bit(A: Tensor,quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64, quant_type='fp4') -> Tensor:
"""
Dequantizes FP4 blockwise quantized values.
Dequantizes the tensor A with maximum absolute values absmax in blocks of size blocksize.
Parameters
----------
A : torch.Tensor
The input 8-bit tensor (packed 4-bit values).
quant_state : tuple(torch.Tensor, torch.Size, torch.dtype)
Tuple of absmax values, original tensor shape and original dtype.
absmax : torch.Tensor
The absmax values.
out : torch.Tensor
Dequantized output tensor.
blocksize : int
The blocksize used in quantization.
quant_type : str
The 4-bit quantization data type {fp4, nf4}
Returns
-------
torch.Tensor:
Dequantized tensor.
"""
if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]:
raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048, 4096, 1024, 512, 256, 128, 64]")
if quant_type not in ['fp4', 'nf4']:
raise NotImplementedError(f'4-bit quantization data type {quant_type} is not implemented.')
if quant_state is None:
assert absmax is not None and out is not None
shape = out.shape
dtype = out.dtype
else:
absmax, shape, dtype, blocksize, compressed_stats, quant_type = quant_state
if compressed_stats is not None:
offset, state2 = compressed_stats
absmax = dequantize_blockwise(absmax, state2)
absmax += offset
if out is None:
out = torch.empty(shape, dtype=dtype, device=A.device)
n = out.numel()
</s>
|
|
tests.test_functional/test_dynamic_blockwise_quantization
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
02fd80cb814285984415fd903278b8217c18c4df
|
Added bfloat16 quantizations and tests.
|
<4>:<add> A1 = torch.randn(1024, 1024, device="cuda", dtype=dtype)
<del> A1 = torch.randn(1024, 1024, device="cuda")
<7>:<add> diff = torch.abs(A1 - A2).float()
<del> diff = torch.abs(A1 - A2)
<8>:<add> reldiff = diff / torch.abs(A1.float() + 1e-8)
<del> reldiff = diff / torch.abs(A1 + 1e-8)
<13>:<add> #print('nested=', nested, 'randn', blocksize, 'dtype', dtype, sum(diffs)/len(diffs))
<add> #print('nested=', nested, 'randn', blocksize, 'dtype', dtype, sum(reldiffs)/len(reldiffs))
<15>:<del> #print('nested=', nested, 'randn', blocksize, sum(diffs)/len(diffs))
<16>:<del> #print('nested=', nested, 'randn', blocksize, sum(reldiffs)/len(reldiffs))
<17>:<add> assert A2.dtype == dtype
<20>:<add> A1 = torch.rand(1024, 1024, device="cuda", dtype=dtype)
<del> A1 = torch.rand(1024, 1024, device="cuda")
<23>:<add> diff = torch.abs(A1 - A2).float()
<del> diff = torch.abs(A1 - A2)
<24>:<add> reldiff = diff / torch.abs(A1.float() + 1e-8)
<del>
|
<s>torch.float32, torch.float16, torch.bfloat16], ids=["fp32", "fp16", "bf16"])
@pytest.mark.parametrize("nested", [False, True], ids=["False", "True"])
@pytest.mark.parametrize("blocksize", [4096, 2048, 1024, 512, 256, 128, 64])
+ def test_dynamic_blockwise_quantization(dtype, nested, blocksize):
- def test_dynamic_blockwise_quantization(nested, blocksize):
<0> #print('')
<1> diffs = []
<2> reldiffs = []
<3> for i in range(100):
<4> A1 = torch.randn(1024, 1024, device="cuda")
<5> C, S = F.quantize_blockwise(A1, blocksize=blocksize, nested=nested)
<6> A2 = F.dequantize_blockwise(C, S)
<7> diff = torch.abs(A1 - A2)
<8> reldiff = diff / torch.abs(A1 + 1e-8)
<9> diffs.append(diff.mean().item())
<10> reldiffs.append(reldiff.mean().item())
<11> abserr = sum(diffs)/len(diffs)
<12> relerr = sum(reldiffs)/len(reldiffs)
<13> assert abserr < 0.011
<14> assert relerr < 0.018
<15> #print('nested=', nested, 'randn', blocksize, sum(diffs)/len(diffs))
<16> #print('nested=', nested, 'randn', blocksize, sum(reldiffs)/len(reldiffs))
<17>
<18> diffs = []
<19> for i in range(100):
<20> A1 = torch.rand(1024, 1024, device="cuda")
<21> C, S = F.quantize_blockwise(A1, blocksize=blocksize, nested=nested)
<22> A2 = F.dequantize_blockwise(C, S)
<23> diff = torch.abs(A1 - A2)
<24> </s>
|
===========below chunk 0===========
<s>, torch.float16, torch.bfloat16], ids=["fp32", "fp16", "bf16"])
@pytest.mark.parametrize("nested", [False, True], ids=["False", "True"])
@pytest.mark.parametrize("blocksize", [4096, 2048, 1024, 512, 256, 128, 64])
+ def test_dynamic_blockwise_quantization(dtype, nested, blocksize):
- def test_dynamic_blockwise_quantization(nested, blocksize):
# offset: 1
diffs.append(diff.mean().item())
reldiffs.append(reldiff.mean().item())
#torch.testing.assert_close(A1, A2, atol=1e-2, rtol=0)
abserr = sum(diffs)/len(diffs)
relerr = sum(reldiffs)/len(reldiffs)
assert abserr < 0.0035
assert relerr < 0.015
===========unchanged ref 0===========
at: _pytest.mark.structures
MARK_GEN = MarkGenerator(_ispytest=True)
at: _pytest.mark.structures.MarkGenerator
skip: _SkipMarkDecorator
skipif: _SkipifMarkDecorator
xfail: _XfailMarkDecorator
parametrize: _ParametrizeMarkDecorator
usefixtures: _UsefixturesMarkDecorator
filterwarnings: _FilterwarningsMarkDecorator
at: bitsandbytes.functional
quantize_blockwise(A: Tensor, code: Tensor=None, absmax: Tensor=None, out: Tensor=None, blocksize=4096, nested=False) -> Tensor
dequantize_blockwise(A: Tensor, quant_state: Tuple[Tensor, Tensor]=None, absmax: Tensor=None, code: Tensor=None, out: Tensor=None, blocksize: int=4096, nested=False) -> Tensor
at: torch._C
float32: dtype = ...
float16: dtype = ...
bfloat16: dtype = ...
at: torch._C._VariableFunctions
abs(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor
===========unchanged ref 1===========
rand(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
rand(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
rand(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
rand(*size: _int, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
rand(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
rand(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=</s>
===========unchanged ref 2===========
randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device:</s>
|
tests.test_functional/test_bench_matmul
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
02fd80cb814285984415fd903278b8217c18c4df
|
Added bfloat16 quantizations and tests.
|
<10>:<add> B_nf4, state_nf4 = F.quantize_nf4(B)
<del> B_nf4, state_nf4= F.quantize_nf4(B)
<23>:<add> F.cutlass3_gemm(A, B_nf4.t(), state=state_nf4)
|
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
<0> iters = 80
<1> formatB = F.get_special_format_str()
<2>
<3> A = torch.randn(batch, seq, model, device="cuda").half()
<4> B = torch.empty(hidden, model, dtype=torch.float16, device="cuda")
<5> torch.nn.init.xavier_uniform_(B)
<6>
<7> B_fp4, state = F.quantize_fp4(B)
<8> B_fp4_c, state_c = F.quantize_fp4(B, compress_statistics=True)
<9>
<10> B_nf4, state_nf4= F.quantize_nf4(B)
<11>
<12> linear8bit = bnb.nn.Linear8bitLt(model, hidden, False, False).cuda().half()
<13> linear8bit.eval()
<14>
<15> outliers = torch.randint(0, model, size=(5,)).cuda()
<16> A[:, :, outliers] = 8.0
<17>
<18> linearMixedBit = (bnb.nn.Linear8bitLt(model, hidden, False, False, threshold=6.0).cuda().half())
<19> #linearMixedBit.eval()
<20>
<21> linear8bit_train = bnb.nn.Linear8bitLt(model, hidden, False).cuda().half()
<22> linear8bit_train_thresh = bnb.nn.Linear8bitLt(model, hidden, False, threshold=6.0).cuda().half()
<23>
<24> # warmup
<25> for i in range(iters):
<26> torch.matmul(A, B.t())
<27> torch.cuda.synchronize()
<28> print("")
<29>
<30> torch.cuda.synchronize()
<31> t0 = time.time()
<32> for i in range(iters):
<33> torch.</s>
|
===========below chunk 0===========
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
# offset: 1
torch.cuda.synchronize()
print( f"pytorch fp16: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
#torch.cuda.synchronize()
#t0 = time.time()
#for i in range(iters):
# bnb.matmul_4bit(A, B_fp4.t(), quant_state=state)
#torch.cuda.synchronize()
#print( f"bnb fp4: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
#torch.cuda.synchronize()
#t0 = time.time()
#for i in range(iters):
# bnb.matmul_4bit(A, B_fp4.t(), quant_state=state_c)
#torch.cuda.synchronize()
#print( f"bnb fp4 + compressed stats: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
bnb.matmul_4bit(A, B_nf4.t(), quant_state=state_nf4)
torch.cuda.synchronize()
print( f"bnb nf4: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s</s>
===========below chunk 1===========
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
# offset: 2
<s>model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
===========unchanged ref 0===========
at: _pytest.mark.structures
MARK_GEN = MarkGenerator(_ispytest=True)
at: _pytest.mark.structures.MarkGenerator
parametrize: _ParametrizeMarkDecorator
at: bitsandbytes.functional
get_special_format_str()
quantize_fp4(A: Tensor, absmax: Tensor=None, out: Tensor=None, blocksize=64, compress_statistics=False)
quantize_nf4(A: Tensor, absmax: Tensor=None, out: Tensor=None, blocksize=64, compress_statistics=False)
cutlass3_gemm(A: Tensor, B: Tensor, out: Tensor=None, transposed_A=False, transposed_B=False, state=None)
at: bitsandbytes.nn.modules
Linear8bitLt(input_features, output_features, bias=True, has_fp16_weights=True, memory_efficient_backward=False, threshold=0.0, index=None)
at: tests.test_functional
values = []
at: time
time() -> float
at: torch._C
float16: dtype = ...
===========unchanged ref 1===========
at: torch._C._VariableFunctions
empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
matmul(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor
===========unchanged ref 2===========
randint(low: Union[_int, SymInt], high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randint(high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randint(low: Union[_int, SymInt], high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randint(high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randint(high: _int, size: _size, *, generator: Optional[Generator]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor
randint(low: _int, high: _int, size: _size, *, generator: Optional[Generator]=</s>
|
tests.test_functional/test_fp4_quant
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
02fd80cb814285984415fd903278b8217c18c4df
|
Added bfloat16 quantizations and tests.
|
<21>:<add> A1 = torch.randn(1024, 1024, device='cuda', dtype=dtype)
<del> A1 = torch.randn(1024, 1024, device='cuda').half()
<30>:<add> assert A2.dtype == dtype
<del>
|
# module: tests.test_functional
#print((time.time()-t0)/1e6)
+ @pytest.mark.parametrize("dtype", [torch.float32, torch.float16, torch.bfloat16], ids=["fp32", "fp16", "bf16"])
+ def test_fp4_quant(dtype):
- def test_fp4_quant():
<0> vals = list(product([0, 1], repeat=4))
<1>
<2> code = {}
<3> for bits in vals:
<4> result = 0
<5> bias = 3
<6> sign, e1, e2, p1 = bits
<7> idx = sign*8 + e1*4 + e2*2 + p1*1
<8> sign = -1.0 if sign else 1.0
<9> exp = e1*2 + e2*1
<10> if exp == 0:
<11> # sub-normal
<12> if p1 == 0: result = 0
<13> else: result = sign*0.0625
<14> else:
<15> # normal
<16> exp = 2**(-exp + bias + 1)
<17> frac = 1.5 if p1 else 1.0
<18> result = sign*exp*frac
<19> code[idx] = result
<20>
<21> A1 = torch.randn(1024, 1024, device='cuda').half()
<22> qa, SA = F.quantize_fp4(A1, blocksize=64)
<23> A2 = F.dequantize_fp4(qa, SA)
<24>
<25> err = (A1 - A2).abs().float()
<26> relerr = (err/A1.abs().float()).mean()
<27> idx = err > 1.0
<28> err = err.mean()
<29>
<30>
<31> assert err.item() < 0.1
<32> assert relerr.item() < 0.28
<33>
|
===========unchanged ref 0===========
at: _pytest.mark.structures
MARK_GEN = MarkGenerator(_ispytest=True)
at: _pytest.mark.structures.MarkGenerator
parametrize: _ParametrizeMarkDecorator
at: bitsandbytes.functional
quantize_blockwise(A: Tensor, code: Tensor=None, absmax: Tensor=None, out: Tensor=None, blocksize=4096, nested=False) -> Tensor
quantize_fp4(A: Tensor, absmax: Tensor=None, out: Tensor=None, blocksize=64, compress_statistics=False)
dequantize_fp4(A: Tensor, quant_state: Tuple[Tensor, Tensor]=None, absmax: Tensor=None, out: Tensor=None, blocksize: int=64) -> Tensor
===========unchanged ref 1===========
at: itertools
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4], iter5: Iterable[_T5], iter6: Iterable[_T6]) -> Iterator[Tuple[_T1, _T2, _T3, _T4, _T5, _T6]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3]) -> Iterator[Tuple[_T1, _T2, _T3]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4]) -> Iterator[Tuple[_T1, _T2, _T3, _T4]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2]) -> Iterator[Tuple[_T1, _T2]]
product(*iterables: Iterable[_T1], repeat: int) -> Iterator[Tuple[_T1, ...]]
product(iter1: Iterable[_T1]) -> Iterator[Tuple[_T1]]
product(*iterables: Iterable[Any], repeat: int=...) -> Iterator[Tuple[Any, ...]]
product(iter1: Iterable[Any], iter2: Iterable[Any], iter3: Iterable[Any], iter4: Iterable[Any], iter5: Iterable[Any], iter6: Iterable[Any], iter7: Iterable[Any], *iterables: Iterable[Any]) -> Iterator[Tuple[Any, ...]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4], iter5: Iterable[_T5]) -> Iterator[Tuple[_T1, _T2, _T3, _T4, _T5]]
at: tests.test_functional.test_bench_dequantization
a = torch.rand(1024, 1024, device='cuda').half()
at: time
time() -> float
===========unchanged ref 2===========
at: torch._C
float32: dtype = ...
float16: dtype = ...
bfloat16: dtype = ...
===========unchanged ref 3===========
at: torch._C._VariableFunctions
randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional</s>
===========unchanged ref 4===========
at: torch.cuda
synchronize(device: _device_t=None) -> None
===========changed ref 0===========
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
"""
Quantize tensor A in blocks of size 4096 values.
Quantizes tensor A by dividing it into blocks of 4096 values.
Then the absolute maximum value within these blocks is calculated
for the non-linear quantization.
Parameters
----------
A : torch.Tensor
The input tensor.
code : torch.Tensor
The quantization map.
absmax : torch.Tensor
The absmax values.
out : torch.Tensor
The output tensor (8-bit).
Returns
-------
torch.Tensor:
The 8-bit tensor.
tuple(torch.Tensor, torch.Tensor):
The quantization state to undo the quantization.
"""
if code is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
if absmax is None:
n = A.numel()
blocks = n // blocksize
blocks += 1 if n % blocksize > 0 else 0
absmax = torch.zeros((blocks,), device=A.device)
if out is None:
out = torch.zeros_like(A, dtype=torch.uint8)
if A.device.type != 'cpu':
assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64]
cblocksize = ct.c_int32(blocksize)
prev_device = pre_call(A.device)
code = code.to(A.device)
is_on_gpu([code, A, out, absmax])
if A.dtype == torch.float32:
lib.cquantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblock</s>
|
tests.test_functional/test_gemm_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
02fd80cb814285984415fd903278b8217c18c4df
|
Added bfloat16 quantizations and tests.
|
<1>:<add> for dim in [64, 128, 256, 512, 1024, 2048, 4096]:
<del> #for dim in [32, 64, 128, 256, 512, 1024, 2048, 4096]:
<2>:<del> #for dim in [4096, 5120, 6656, 8192]:
<3>:<del> #for dim in [32]:
<4>:<del> for dim in [2*4096]:
<5>:<del> #for dim in [5120]:
<6>:<del> #for dim in [6656]:
<7>:<del> #for dim in [4]:
<12>:<add>
|
<s>torch.float32, torch.float16], ids=['fp32', 'fp16'])
+ @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16], ids=['fp16', 'bf16'])
+ #@pytest.mark.parametrize("dtype", [torch.bfloat16], ids=['bf16'])
- @pytest.mark.parametrize("dtype", [torch.float16], ids=['fp16'])
def test_gemm_4bit(dtype):
<0> print('')
<1> #for dim in [32, 64, 128, 256, 512, 1024, 2048, 4096]:
<2> #for dim in [4096, 5120, 6656, 8192]:
<3> #for dim in [32]:
<4> for dim in [2*4096]:
<5> #for dim in [5120]:
<6> #for dim in [6656]:
<7> #for dim in [4]:
<8> errs = []
<9> relerrs = []
<10> max_err = 0
<11> max_relerr = 0
<12> for i in range(100):
<13> #A = torch.rand(2, 4092, dtype=dtype, device='cuda')
<14> #B = torch.rand(4*4092, 4092, dtype=dtype, device='cuda')
<15> #A = torch.rand(1, 4096, dtype=dtype, device='cuda')
<16> #B = torch.rand(4*4096, 4096, dtype=dtype, device='cuda')
<17> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<18> B = torch.randn(4*dim, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<19> #B = torch.randn(1, dim+2, dtype=dtype, device='cuda')/math.sqrt(dim)
<20>
<21> #print('')
<22> #print(A)
<23> #print(B.t())
<24> #A[:, :-1] = 0
<25> #B[:, :</s>
|
===========below chunk 0===========
<s>, torch.float16], ids=['fp32', 'fp16'])
+ @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16], ids=['fp16', 'bf16'])
+ #@pytest.mark.parametrize("dtype", [torch.bfloat16], ids=['bf16'])
- @pytest.mark.parametrize("dtype", [torch.float16], ids=['fp16'])
def test_gemm_4bit(dtype):
# offset: 1
#A.flatten()[:-1] = 0
#B.flatten()[:-1] = 0
qB, state = F.quantize_nf4(B)
F.dequantize_nf4(qB, state)
#C3 = torch.matmul(A, B.t())
C2 = F.cutlass3_gemm(A, qB.t(), state=state)
C1 = bnb.matmul_4bit(A, qB.t(), state)
#print(state)
#print(qB)
#print('')
#print(A)
#print(B)
#print('='*89)
#print(C1)
#print(C2)
#print(C3)
#print(C1.shape, C2.shape)
# tensor cores are non-deterministic
# so we need to analyze errors around the mean
# to test our implementation
err = torch.abs(C1-C2)
mag = torch.abs(C1)+1e-8
relerr = err/mag
max_err = max(err.max(), max_err)
max_relerr = max(relerr.max(), max_relerr)
err = err.mean().item()
relerr = relerr.mean().item()
#print(err)
errs.append(err)
relerrs.append(relerr)
if err/torch.abs(C1).mean() ></s>
===========below chunk 1===========
<s>, torch.float16], ids=['fp32', 'fp16'])
+ @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16], ids=['fp16', 'bf16'])
+ #@pytest.mark.parametrize("dtype", [torch.bfloat16], ids=['bf16'])
- @pytest.mark.parametrize("dtype", [torch.float16], ids=['fp16'])
def test_gemm_4bit(dtype):
# offset: 2
<s>err)
relerrs.append(relerr)
if err/torch.abs(C1).mean() > 5e-5 or err > 3.2e-5:
print('')
print(i, err, relerr)
#print(A.flatten()[-6:])
#print(B.flatten()[-6:])
#out = A.flatten()[-6:]*B.flatten()[-6:]
#print(out)
#print(out[:-1].sum())
print('='*80)
#print(C1.flatten()[-6:])
#print(C2.flatten()[-6:])
#assert False, 'ERROR'
c = int(C1.numel()*0.0014*(dim/256))+1
c = assert_all_approx_close(C1, C2, 1e-5, 0.01, count=c, throw=False)
print(c/math.sqrt(dim))
print('')
print(dim, sum(errs)/len(errs)/math.sqrt(dim))
print(dim, sum(relerrs)/len(relerrs)/math.sqrt(dim))
print(dim, (max_err.item(), max_relerr.item()))
===========unchanged ref 0===========
at: _pytest.mark.structures
MARK_GEN = MarkGenerator(_ispytest=True)
at: _pytest.mark.structures.MarkGenerator
skip: _SkipMarkDecorator
parametrize: _ParametrizeMarkDecorator
at: bitsandbytes.functional
get_paged(*shape, dtype=torch.float32, device=torch.device('cuda', index=0))
quantize_nf4(A: Tensor, absmax: Tensor=None, out: Tensor=None, blocksize=64, compress_statistics=False)
dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor]=None, absmax: Tensor=None, out: Tensor=None, blocksize: int=64) -> Tensor
cutlass3_gemm(A: Tensor, B: Tensor, out: Tensor=None, transposed_A=False, transposed_B=False, state=None)
at: math
sqrt(x: SupportsFloat, /) -> float
at: tests.test_functional
assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0, throw=True)
at: tests.test_functional.test_cutlass3_gemm
errs = []
relerrs = []
max_err = max(err.max(), max_err)
max_relerr = max(relerr.max(), max_relerr)
at: torch._C
float32: dtype = ...
float16: dtype = ...
bfloat16: dtype = ...
uint8: dtype = ...
at: torch._C._VariableFunctions
abs(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor
matmul(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor
===========unchanged ref 1===========
randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device:</s>
|
tests.test_functional/test_gemm_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
7e49b5b9384042a5b4eec5a69abf45cfe0c3b8da
|
Added warp_shuffle indexing 185 vs 54.
|
<1>:<add> #for dim in [64, 128, 256, 512, 1024, 2048, 4096]:
<del> for dim in [64, 128, 256, 512, 1024, 2048, 4096]:
<2>:<add> for dim in [4096]:
|
# module: tests.test_functional
#@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16], ids=['fp16', 'bf16'])
#@pytest.mark.parametrize("dtype", [torch.bfloat16], ids=['bf16'])
def test_gemm_4bit(dtype):
<0> print('')
<1> for dim in [64, 128, 256, 512, 1024, 2048, 4096]:
<2> errs = []
<3> relerrs = []
<4> max_err = 0
<5> max_relerr = 0
<6>
<7> for i in range(100):
<8> #A = torch.rand(2, 4092, dtype=dtype, device='cuda')
<9> #B = torch.rand(4*4092, 4092, dtype=dtype, device='cuda')
<10> #A = torch.rand(1, 4096, dtype=dtype, device='cuda')
<11> #B = torch.rand(4*4096, 4096, dtype=dtype, device='cuda')
<12> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<13> B = torch.randn(4*dim, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<14> #B = torch.randn(1, dim+2, dtype=dtype, device='cuda')/math.sqrt(dim)
<15>
<16> #print('')
<17> #print(A)
<18> #print(B.t())
<19> #A[:, :-1] = 0
<20> #B[:, :-1] = 0
<21> #A.flatten()[:-1] = 0
<22> #B.flatten()[:-1] = 0
<23>
<24> qB, state = F.quantize_nf4(B)
<25> F.dequantize_nf4(qB, state)
<26>
<27> #C2 = b</s>
|
===========below chunk 0===========
# module: tests.test_functional
#@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16], ids=['fp16', 'bf16'])
#@pytest.mark.parametrize("dtype", [torch.bfloat16], ids=['bf16'])
def test_gemm_4bit(dtype):
# offset: 1
C2 = F.cutlass3_gemm(A, qB.t(), state=state)
C1 = torch.matmul(A, B.t())
#print(state)
#print(qB)
#print('')
#print(A)
#print(B)
#print('='*89)
#print(C1)
#print(C2)
#print(C3)
#print(C1.shape, C2.shape)
# tensor cores are non-deterministic
# so we need to analyze errors around the mean
# to test our implementation
err = torch.abs(C1-C2).float()
mag = torch.abs(C1).float()+1e-5
relerr = err/mag
max_err = max(err.max(), max_err)
max_relerr = max(relerr.max(), max_relerr)
err = err.mean().item()
relerr = relerr.mean().item()
#print(err)
errs.append(err)
relerrs.append(relerr)
c = int(C1.numel()*0.0014*(dim/256))+1
c = assert_all_approx_close(C1, C2, 1e-5, 0.01, count=c, throw=False)
#print('')
#print(dim, sum(errs)/len(errs)/math.sqrt(dim))
#print(dim, sum(relerr</s>
===========below chunk 1===========
# module: tests.test_functional
#@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16], ids=['fp16', 'bf16'])
#@pytest.mark.parametrize("dtype", [torch.bfloat16], ids=['bf16'])
def test_gemm_4bit(dtype):
# offset: 2
<s>(dim, sum(errs)/len(errs)/math.sqrt(dim))
#print(dim, sum(relerrs)/len(relerrs)/math.sqrt(dim))
#print(dim, (max_err.item(), max_relerr.item()))
#print(sum(errs)/len(errs)/math.sqrt(dim) , 0.00015)
#print(sum(relerrs)/len(relerrs)/math.sqrt(dim) , 0.0015)
assert sum(errs)/len(errs)/math.sqrt(dim) < 0.011
assert sum(relerrs)/len(relerrs)/math.sqrt(dim) < 0.15
===========unchanged ref 0===========
at: _pytest.mark.structures
MARK_GEN = MarkGenerator(_ispytest=True)
at: _pytest.mark.structures.MarkGenerator
skip: _SkipMarkDecorator
skipif: _SkipifMarkDecorator
xfail: _XfailMarkDecorator
parametrize: _ParametrizeMarkDecorator
usefixtures: _UsefixturesMarkDecorator
filterwarnings: _FilterwarningsMarkDecorator
at: bitsandbytes.functional
quantize_nf4(A: Tensor, absmax: Tensor=None, out: Tensor=None, blocksize=64, compress_statistics=False)
dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor]=None, absmax: Tensor=None, out: Tensor=None, blocksize: int=64) -> Tensor
cutlass3_gemm(A: Tensor, B: Tensor, out: Tensor=None, transposed_A=False, transposed_B=False, state=None)
at: math
sqrt(x: SupportsFloat, /) -> float
at: tests.test_functional
assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0, throw=True)
at: torch._C
float16: dtype = ...
bfloat16: dtype = ...
at: torch._C._VariableFunctions
abs(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor
matmul(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor
===========unchanged ref 1===========
randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device:</s>
|
tests.test_functional/test_gemm_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
eefbf60270497d0dd55b7abe18c519f0c75331f3
|
Turning optimization (float accumulation). 185 vs 50.
|
<2>:<add> for dim in [4*1024]:
<del> for dim in [4096]:
|
# module: tests.test_functional
#@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16], ids=['fp16', 'bf16'])
#@pytest.mark.parametrize("dtype", [torch.bfloat16], ids=['bf16'])
def test_gemm_4bit(dtype):
<0> print('')
<1> #for dim in [64, 128, 256, 512, 1024, 2048, 4096]:
<2> for dim in [4096]:
<3> errs = []
<4> relerrs = []
<5> max_err = 0
<6> max_relerr = 0
<7>
<8> for i in range(100):
<9> #A = torch.rand(2, 4092, dtype=dtype, device='cuda')
<10> #B = torch.rand(4*4092, 4092, dtype=dtype, device='cuda')
<11> #A = torch.rand(1, 4096, dtype=dtype, device='cuda')
<12> #B = torch.rand(4*4096, 4096, dtype=dtype, device='cuda')
<13> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<14> B = torch.randn(4*dim, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<15> #B = torch.randn(1, dim+2, dtype=dtype, device='cuda')/math.sqrt(dim)
<16>
<17> #print('')
<18> #print(A)
<19> #print(B.t())
<20> #A[:, :-1] = 0
<21> #B[:, :-1] = 0
<22> #A.flatten()[:-1] = 0
<23> #B.flatten()[:-1] = 0
<24>
<25> qB, state = F.quantize_nf4(B)
<26> F.dequantize_nf4(qB,</s>
|
===========below chunk 0===========
# module: tests.test_functional
#@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16], ids=['fp16', 'bf16'])
#@pytest.mark.parametrize("dtype", [torch.bfloat16], ids=['bf16'])
def test_gemm_4bit(dtype):
# offset: 1
#C2 = bnb.matmul_4bit(A, qB.t(), state)
C2 = F.cutlass3_gemm(A, qB.t(), state=state)
C1 = torch.matmul(A, B.t())
#print(state)
#print(qB)
#print('')
#print(A)
#print(B)
#print('='*89)
#print(C1)
#print(C2)
#print(C3)
#print(C1.shape, C2.shape)
# tensor cores are non-deterministic
# so we need to analyze errors around the mean
# to test our implementation
err = torch.abs(C1-C2).float()
mag = torch.abs(C1).float()+1e-5
relerr = err/mag
max_err = max(err.max(), max_err)
max_relerr = max(relerr.max(), max_relerr)
err = err.mean().item()
relerr = relerr.mean().item()
#print(err)
errs.append(err)
relerrs.append(relerr)
c = int(C1.numel()*0.0014*(dim/256))+1
c = assert_all_approx_close(C1, C2, 1e-5, 0.01, count=c, throw=False)
===========unchanged ref 0===========
at: _pytest.mark.structures
MARK_GEN = MarkGenerator(_ispytest=True)
at: _pytest.mark.structures.MarkGenerator
skip: _SkipMarkDecorator
skipif: _SkipifMarkDecorator
xfail: _XfailMarkDecorator
parametrize: _ParametrizeMarkDecorator
usefixtures: _UsefixturesMarkDecorator
filterwarnings: _FilterwarningsMarkDecorator
at: bitsandbytes.functional
quantize_nf4(A: Tensor, absmax: Tensor=None, out: Tensor=None, blocksize=64, compress_statistics=False)
dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor]=None, absmax: Tensor=None, out: Tensor=None, blocksize: int=64) -> Tensor
cutlass3_gemm(A: Tensor, B: Tensor, out: Tensor=None, transposed_A=False, transposed_B=False, state=None)
at: math
sqrt(x: SupportsFloat, /) -> float
at: tests.test_functional
assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0, throw=True)
at: torch._C
float16: dtype = ...
bfloat16: dtype = ...
at: torch._C._VariableFunctions
abs(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor
matmul(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor
===========unchanged ref 1===========
randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device:</s>
|
bitsandbytes.autograd._functions/matmul_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
4b88d69de76f4e876d71665f48392b4c12e48867
|
Added abitrary data types; fixed a bug for small matrices.
|
<2>:<add> return F.gemv_4bit(A, B.t(), out, state=quant_state)
<del> return F.cutlass3_gemm(A, B.t(), out, state=quant_state)
|
# module: bitsandbytes.autograd._functions
def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
<0> assert quant_state is not None
<1> if A.numel() == A.shape[-1] and A.requires_grad == False:
<2> return F.cutlass3_gemm(A, B.t(), out, state=quant_state)
<3> else:
<4> return MatMul4Bit.apply(A, B, out, bias, quant_state)
<5>
|
===========unchanged ref 0===========
at: bitsandbytes.autograd._functions
tensor = torch.Tensor
MatMul4Bit(*args, **kwargs)
at: torch._tensor.Tensor.__setstate__
self.requires_grad, _, self._backward_hooks = state
at: typing
List = _alias(list, 1, inst=False, name='List')
|
bitsandbytes.functional/create_normal_map
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
4b88d69de76f4e876d71665f48392b4c12e48867
|
Added abitrary data types; fixed a bug for small matrices.
|
<5>:<del> v = v1 + v2 + v3
<10>:<add>
<add> v = v1 + v2 + v3
<del> v = v1 + v2 + v3
<15>:<add>
<16>:<add>
|
# module: bitsandbytes.functional
def create_normal_map(offset=0.9677083, use_extra_value=True):
<0> if use_extra_value:
<1> # one more positive value, this is an asymmetric type
<2> v1 = norm.ppf(torch.linspace(offset, 0.5, 9)[:-1]).tolist()
<3> v2 = [0]*(256-15) ## we have 15 non-zero values in this data type
<4> v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
<5> v = v1 + v2 + v3
<6> else:
<7> v1 = norm.ppf(torch.linspace(offset, 0.5, 8)[:-1]).tolist()
<8> v2 = [0]*(256-14) ## we have 14 non-zero values in this data type
<9> v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
<10> v = v1 + v2 + v3
<11>
<12> values = torch.Tensor(v)
<13> values = values.sort().values
<14> values /= values.max()
<15> assert values.numel() == 256
<16> return values
<17>
|
===========changed ref 0===========
# module: bitsandbytes.autograd._functions
def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
assert quant_state is not None
if A.numel() == A.shape[-1] and A.requires_grad == False:
+ return F.gemv_4bit(A, B.t(), out, state=quant_state)
- return F.cutlass3_gemm(A, B.t(), out, state=quant_state)
else:
return MatMul4Bit.apply(A, B, out, bias, quant_state)
|
bitsandbytes.functional/quantize_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
4b88d69de76f4e876d71665f48392b4c12e48867
|
Added abitrary data types; fixed a bug for small matrices.
|
# module: bitsandbytes.functional
def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4') -> Tensor:
<0> """
<1> Quantize tensor A in blocks of 4-bit values.
<2>
<3> Quantizes tensor A by dividing it into blocks which are independently quantized to FP4.
<4>
<5> Parameters
<6> ----------
<7> A : torch.Tensor
<8> The input tensor.
<9> absmax : torch.Tensor
<10> The absmax values.
<11> out : torch.Tensor
<12> The output tensor (8-bit).
<13> blocksize : int
<14> The blocksize used in quantization.
<15> quant_type : str
<16> The 4-bit quantization data type {fp4, nf4}
<17>
<18> Returns
<19> -------
<20> torch.Tensor:
<21> The 8-bit tensor with packed 4-bit values.
<22> tuple(torch.Tensor, torch.Size, torch.dtype, int):
<23> The quantization state to undo the quantization.
<24> """
<25> if A.device.type != 'cuda':
<26> raise NotImplementedError(f'Device type not supported for FP4 quantization: {A.device.type}')
<27> if quant_type not in ['fp4', 'nf4']:
<28> raise NotImplementedError(f'4-bit quantization data type {quant_type} is not implemented.')
<29>
<30> n = A.numel()
<31> input_shape = A.shape
<32>
<33> if absmax is None:
<34> blocks = n // blocksize
<35> blocks += 1 if n % blocksize > 0 else 0
<36> absmax = torch.zeros((blocks,), device=A.device)
<37>
<38>
<39> if out is None:
<40> out = torch.zeros(((n+1)//2, 1), dtype=torch.uint8, device=A.device)
<41>
<42> assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64]
<43>
<44> prev_device = pre_</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4') -> Tensor:
# offset: 1
is_on_gpu([A, out, absmax])
if A.dtype == torch.float32:
if quant_type == 'fp4':
lib.cquantize_blockwise_fp32_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
else:
lib.cquantize_blockwise_fp32_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
elif A.dtype == torch.float16:
if quant_type == 'fp4':
lib.cquantize_blockwise_fp16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
else:
lib.cquantize_blockwise_fp16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
elif A.dtype == torch.bfloat16:
if quant_type == 'fp4':
lib.cquantize_blockwise_bf16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
else:
lib.cquantize_blockwise_bf16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize</s>
===========below chunk 1===========
# module: bitsandbytes.functional
def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4') -> Tensor:
# offset: 2
<s>None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
if compress_statistics:
offset = absmax.mean()
absmax -= offset
#code = create_custom_map().to(absmax.device)
#qabsmax, state2 = quantize_blockwise(absmax, code=code, blocksize=256)
qabsmax, state2 = quantize_blockwise(absmax, blocksize=256)
del absmax
state = [qabsmax, input_shape, A.dtype, blocksize, [offset, state2], quant_type]
else:
state = [absmax, input_shape, A.dtype, blocksize, None, quant_type]
return out, state
===========changed ref 0===========
# module: bitsandbytes.functional
+ def get_4bit_type(typename, device=None, blocksize=64):
+ if device is None: device = 'cuda'
+ data = None
+ if typename == 'nf4':
+ data = [-1.0, -0.6961928009986877, -0.5250730514526367, -0.39491748809814453, -0.28444138169288635,
+ -0.18477343022823334, -0.09105003625154495, 0.0, 0.07958029955625534, 0.16093020141124725,
+ 0.24611230194568634, 0.33791524171829224, 0.44070982933044434, 0.5626170039176941,
+ 0.7229568362236023, 1.0]
+ elif typename == 'fp4':
+ # 0b000 = 0
+ # 0b001 = 0.0625
+ # 0b010 = 8
+ # 0b011 = 12
+ # 0b100 = 4
+ # 0b101 = 6
+ # 0b110 = 2
+ # 0b111 = 3
+ data = [0, 0.0625, 8.0, 12.0, 4.0, 6.0, 2.0, 3.0, -0, -0.0625, -8.0, -12.0, -4.0, -6.0, -2.0, -3.0]
+ elif typename == 'int4':
+ data = [7, 6, 5, 4, 3, 2, 1, 0, -0, -1, -2, -3, -4, -5, -6, -7]
+ elif typename == 'af4':
+ # Taken from: NF4 Isn't Information Theoretically Optimal (and that's Good)
+ # https://arxiv</s>
===========changed ref 1===========
# module: bitsandbytes.functional
+ def get_4bit_type(typename, device=None, blocksize=64):
# offset: 1
<s> from: NF4 Isn't Information Theoretically Optimal (and that's Good)
+ # https://arxiv.org/abs/2306.06965
+ if blocksize == 64:
+ data = [-1., -0.69441008, -0.51243739, -0.3736951, -0.25607552, -0.14982478,
+ -0.04934812, 0., 0.04273164, 0.12934483, 0.21961274, 0.31675666,
+ 0.42563882, 0.55496234, 0.72424863, 1.][::-1]
+ else:
+ raise NotImplementedError(f'4-bit AbnormalFloats currently only support blocksize 64.')
+
+ if data is None:
+ raise NotImplementedError(f'Typename {typename} not supported')
+
+ data = Tensor(data)
+ data /= data.abs().max()
+ assert data.numel() == 16
+
+ return data.to(device)
+
===========changed ref 2===========
# module: bitsandbytes.functional
def create_normal_map(offset=0.9677083, use_extra_value=True):
if use_extra_value:
# one more positive value, this is an asymmetric type
v1 = norm.ppf(torch.linspace(offset, 0.5, 9)[:-1]).tolist()
v2 = [0]*(256-15) ## we have 15 non-zero values in this data type
v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
- v = v1 + v2 + v3
else:
v1 = norm.ppf(torch.linspace(offset, 0.5, 8)[:-1]).tolist()
v2 = [0]*(256-14) ## we have 14 non-zero values in this data type
v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
+
+ v = v1 + v2 + v3
- v = v1 + v2 + v3
values = torch.Tensor(v)
values = values.sort().values
values /= values.max()
+
assert values.numel() == 256
+
return values
|
|
bitsandbytes.functional/dequantize_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
4b88d69de76f4e876d71665f48392b4c12e48867
|
Added abitrary data types; fixed a bug for small matrices.
|
<36>:<add> absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = quant_state
<del> absmax, shape, dtype, blocksize, compressed_stats, quant_type = quant_state
|
# module: bitsandbytes.functional
def dequantize_4bit(A: Tensor,quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64, quant_type='fp4') -> Tensor:
<0> """
<1> Dequantizes FP4 blockwise quantized values.
<2>
<3> Dequantizes the tensor A with maximum absolute values absmax in blocks of size blocksize.
<4>
<5> Parameters
<6> ----------
<7> A : torch.Tensor
<8> The input 8-bit tensor (packed 4-bit values).
<9> quant_state : tuple(torch.Tensor, torch.Size, torch.dtype)
<10> Tuple of absmax values, original tensor shape and original dtype.
<11> absmax : torch.Tensor
<12> The absmax values.
<13> out : torch.Tensor
<14> Dequantized output tensor.
<15> blocksize : int
<16> The blocksize used in quantization.
<17> quant_type : str
<18> The 4-bit quantization data type {fp4, nf4}
<19>
<20>
<21> Returns
<22> -------
<23> torch.Tensor:
<24> Dequantized tensor.
<25> """
<26> if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]:
<27> raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048, 4096, 1024, 512, 256, 128, 64]")
<28> if quant_type not in ['fp4', 'nf4']:
<29> raise NotImplementedError(f'4-bit quantization data type {quant_type} is not implemented.')
<30>
<31> if quant_state is None:
<32> assert absmax is not None and out is not None
<33> shape = out.shape
<34> dtype = out.dtype
<35> else:
<36> absmax, shape, dtype, blocksize, compressed_stats, quant_type = quant_state
<37>
<38>
<39> if compressed_stats is not None:
<40> offset, state2 = compressed_stats
<41> absmax = dequantize_blockwise(absmax, state2)
<42> abs</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def dequantize_4bit(A: Tensor,quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64, quant_type='fp4') -> Tensor:
# offset: 1
if out is None:
out = torch.empty(shape, dtype=dtype, device=A.device)
n = out.numel()
device = pre_call(A.device)
is_on_gpu([A, absmax, out])
if out.dtype == torch.float32:
if quant_type == 'fp4':
lib.cdequantize_blockwise_fp32_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
else:
lib.cdequantize_blockwise_fp32_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
elif out.dtype == torch.float16:
if quant_type == 'fp4':
lib.cdequantize_blockwise_fp16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
else:
lib.cdequantize_blockwise_fp16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
elif out.dtype == torch.bfloat16:
if quant_type == 'fp4':
lib.cdequantize_blockwise_bf16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c</s>
===========below chunk 1===========
# module: bitsandbytes.functional
def dequantize_4bit(A: Tensor,quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64, quant_type='fp4') -> Tensor:
# offset: 2
<s>_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
else:
lib.cdequantize_blockwise_bf16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
is_transposed = (True if A.shape[0] == 1 else False)
if is_transposed: return out.t()
else: return out
===========changed ref 0===========
# module: bitsandbytes.functional
+ def get_4bit_type(typename, device=None, blocksize=64):
+ if device is None: device = 'cuda'
+ data = None
+ if typename == 'nf4':
+ data = [-1.0, -0.6961928009986877, -0.5250730514526367, -0.39491748809814453, -0.28444138169288635,
+ -0.18477343022823334, -0.09105003625154495, 0.0, 0.07958029955625534, 0.16093020141124725,
+ 0.24611230194568634, 0.33791524171829224, 0.44070982933044434, 0.5626170039176941,
+ 0.7229568362236023, 1.0]
+ elif typename == 'fp4':
+ # 0b000 = 0
+ # 0b001 = 0.0625
+ # 0b010 = 8
+ # 0b011 = 12
+ # 0b100 = 4
+ # 0b101 = 6
+ # 0b110 = 2
+ # 0b111 = 3
+ data = [0, 0.0625, 8.0, 12.0, 4.0, 6.0, 2.0, 3.0, -0, -0.0625, -8.0, -12.0, -4.0, -6.0, -2.0, -3.0]
+ elif typename == 'int4':
+ data = [7, 6, 5, 4, 3, 2, 1, 0, -0, -1, -2, -3, -4, -5, -6, -7]
+ elif typename == 'af4':
+ # Taken from: NF4 Isn't Information Theoretically Optimal (and that's Good)
+ # https://arxiv</s>
===========changed ref 1===========
# module: bitsandbytes.functional
+ def get_4bit_type(typename, device=None, blocksize=64):
# offset: 1
<s> from: NF4 Isn't Information Theoretically Optimal (and that's Good)
+ # https://arxiv.org/abs/2306.06965
+ if blocksize == 64:
+ data = [-1., -0.69441008, -0.51243739, -0.3736951, -0.25607552, -0.14982478,
+ -0.04934812, 0., 0.04273164, 0.12934483, 0.21961274, 0.31675666,
+ 0.42563882, 0.55496234, 0.72424863, 1.][::-1]
+ else:
+ raise NotImplementedError(f'4-bit AbnormalFloats currently only support blocksize 64.')
+
+ if data is None:
+ raise NotImplementedError(f'Typename {typename} not supported')
+
+ data = Tensor(data)
+ data /= data.abs().max()
+ assert data.numel() == 16
+
+ return data.to(device)
+
===========changed ref 2===========
# module: bitsandbytes.functional
def create_normal_map(offset=0.9677083, use_extra_value=True):
if use_extra_value:
# one more positive value, this is an asymmetric type
v1 = norm.ppf(torch.linspace(offset, 0.5, 9)[:-1]).tolist()
v2 = [0]*(256-15) ## we have 15 non-zero values in this data type
v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
- v = v1 + v2 + v3
else:
v1 = norm.ppf(torch.linspace(offset, 0.5, 8)[:-1]).tolist()
v2 = [0]*(256-14) ## we have 14 non-zero values in this data type
v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
+
+ v = v1 + v2 + v3
- v = v1 + v2 + v3
values = torch.Tensor(v)
values = values.sort().values
values /= values.max()
+
assert values.numel() == 256
+
return values
|
tests.test_functional/test_bench_matmul
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
4b88d69de76f4e876d71665f48392b4c12e48867
|
Added abitrary data types; fixed a bug for small matrices.
|
<23>:<add> F.gemv_4bit(A, B_nf4.t(), state=state_nf4)
<del> F.cutlass3_gemm(A, B_nf4.t(), state=state_nf4)
|
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
<0> iters = 80
<1> formatB = F.get_special_format_str()
<2>
<3> A = torch.randn(batch, seq, model, device="cuda").half()
<4> B = torch.empty(hidden, model, dtype=torch.float16, device="cuda")
<5> torch.nn.init.xavier_uniform_(B)
<6>
<7> B_fp4, state = F.quantize_fp4(B)
<8> B_fp4_c, state_c = F.quantize_fp4(B, compress_statistics=True)
<9>
<10> B_nf4, state_nf4 = F.quantize_nf4(B)
<11>
<12> linear8bit = bnb.nn.Linear8bitLt(model, hidden, False, False).cuda().half()
<13> linear8bit.eval()
<14>
<15> outliers = torch.randint(0, model, size=(5,)).cuda()
<16> A[:, :, outliers] = 8.0
<17>
<18> linearMixedBit = (bnb.nn.Linear8bitLt(model, hidden, False, False, threshold=6.0).cuda().half())
<19> #linearMixedBit.eval()
<20>
<21> linear8bit_train = bnb.nn.Linear8bitLt(model, hidden, False).cuda().half()
<22> linear8bit_train_thresh = bnb.nn.Linear8bitLt(model, hidden, False, threshold=6.0).cuda().half()
<23> F.cutlass3_gemm(A, B_nf4.t(), state=state_nf4)
<24>
<25> # warmup
<26> for i in range(iters):
<27> torch.matmul(A, B.t())
<28> torch.cuda.synchronize()
<29> print("")
<30>
<31> torch.cuda</s>
|
===========below chunk 0===========
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
# offset: 1
t0 = time.time()
for i in range(iters):
torch.matmul(A, B.t())
torch.cuda.synchronize()
print( f"pytorch fp16: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
#torch.cuda.synchronize()
#t0 = time.time()
#for i in range(iters):
# bnb.matmul_4bit(A, B_fp4.t(), quant_state=state)
#torch.cuda.synchronize()
#print( f"bnb fp4: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
#torch.cuda.synchronize()
#t0 = time.time()
#for i in range(iters):
# bnb.matmul_4bit(A, B_fp4.t(), quant_state=state_c)
#torch.cuda.synchronize()
#print( f"bnb fp4 + compressed stats: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
#bnb.matmul_4bit(A, B_nf4.t(), quant_state=state_nf4)
F.cutlass3_gemm(A, B_nf4.t(), state=state_nf4)
</s>
===========below chunk 1===========
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
# offset: 2
<s> F.cutlass3_gemm(A, B_nf4.t(), state=state_nf4)
torch.cuda.synchronize()
print( f"bnb nf4: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
===========unchanged ref 0===========
at: _pytest.mark.structures
MARK_GEN = MarkGenerator(_ispytest=True)
at: _pytest.mark.structures.MarkGenerator
skip: _SkipMarkDecorator
skipif: _SkipifMarkDecorator
xfail: _XfailMarkDecorator
parametrize: _ParametrizeMarkDecorator
usefixtures: _UsefixturesMarkDecorator
filterwarnings: _FilterwarningsMarkDecorator
at: bitsandbytes.functional
get_special_format_str()
quantize_fp4(A: Tensor, absmax: Tensor=None, out: Tensor=None, blocksize=64, compress_statistics=False)
quantize_nf4(A: Tensor, absmax: Tensor=None, out: Tensor=None, blocksize=64, compress_statistics=False)
at: bitsandbytes.nn.modules
Linear8bitLt(input_features, output_features, bias=True, has_fp16_weights=True, memory_efficient_backward=False, threshold=0.0, index=None)
at: tests.test_functional
values = []
names = ["batch_{}_seq_{}_model_{}_hidden_{}".format(*vals) for vals in values]
at: time
time() -> float
at: torch._C
float16: dtype = ...
===========unchanged ref 1===========
at: torch._C._VariableFunctions
empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
matmul(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor
===========unchanged ref 2===========
randint(low: Union[_int, SymInt], high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randint(high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randint(low: Union[_int, SymInt], high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randint(high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randint(high: _int, size: _size, *, generator: Optional[Generator]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor
randint(low: _int, high: _int, size: _size, *, generator: Optional[Generator]=</s>
|
bitsandbytes.autograd._functions/MatMul4Bit.forward
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
94168d79d74174ee4ba7c183e2cfc7dacc89c939
|
Added FP4 fast inference support.
|
<16>:<add> output = torch.nn.functional.linear(A, F.dequantize_4bit(B, state).to(A.dtype).t(), bias)
<del> output = torch.nn.functional.linear(A, F.dequantize_fp4(B, state).to(A.dtype).t(), bias)
|
# module: bitsandbytes.autograd._functions
class MatMul4Bit(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
def forward(ctx, A, B, out=None, bias=None, state=None):
<0> # default of pytorch behavior if inputs are empty
<1> ctx.is_empty = False
<2> if prod(A.shape) == 0:
<3> ctx.is_empty = True
<4> ctx.A = A
<5> ctx.B = B
<6> ctx.bias = bias
<7> B_shape = state[1]
<8> if A.shape[-1] == B_shape[0]:
<9> return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)
<10> else:
<11> return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)
<12>
<13>
<14> # 1. Dequantize
<15> # 2. MatmulnN
<16> output = torch.nn.functional.linear(A, F.dequantize_fp4(B, state).to(A.dtype).t(), bias)
<17>
<18> # 3. Save state
<19> ctx.state = state
<20> ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype
<21>
<22> if any(ctx.needs_input_grad[:2]):
<23> ctx.tensors = (A, B)
<24> else:
<25> ctx.tensors = (None, None)
<26>
<27> return output
<28>
| |
bitsandbytes.autograd._functions/MatMul4Bit.backward
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
94168d79d74174ee4ba7c183e2cfc7dacc89c939
|
Added FP4 fast inference support.
|
<16>:<add> if req_gradA: grad_A = torch.matmul(grad_output, F.dequantize_4bit(B, ctx.state).to(grad_output.dtype).t())
<del> if req_gradA: grad_A = torch.matmul(grad_output, F.dequantize_fp4(B, ctx.state).to(grad_output.dtype).t())
|
# module: bitsandbytes.autograd._functions
class MatMul4Bit(torch.autograd.Function):
@staticmethod
def backward(ctx, grad_output):
<0> if ctx.is_empty:
<1> bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)
<2> return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None
<3>
<4> req_gradA, _, _, req_gradBias, _= ctx.needs_input_grad
<5> A, B = ctx.tensors
<6> state = ctx.state
<7>
<8> grad_A, grad_B, grad_bias = None, None, None
<9>
<10> if req_gradBias:
<11> # compute grad_bias first before changing grad_output dtype
<12> grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)
<13>
<14> # not supported by PyTorch. TODO: create work-around
<15> #if req_gradB: grad_B = torch.matmul(grad_output.t(), A)
<16> if req_gradA: grad_A = torch.matmul(grad_output, F.dequantize_fp4(B, ctx.state).to(grad_output.dtype).t())
<17>
<18> return grad_A, grad_B, None, grad_bias, None
<19>
|
===========changed ref 0===========
# module: bitsandbytes.autograd._functions
class MatMul4Bit(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
def forward(ctx, A, B, out=None, bias=None, state=None):
# default of pytorch behavior if inputs are empty
ctx.is_empty = False
if prod(A.shape) == 0:
ctx.is_empty = True
ctx.A = A
ctx.B = B
ctx.bias = bias
B_shape = state[1]
if A.shape[-1] == B_shape[0]:
return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)
else:
return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)
# 1. Dequantize
# 2. MatmulnN
+ output = torch.nn.functional.linear(A, F.dequantize_4bit(B, state).to(A.dtype).t(), bias)
- output = torch.nn.functional.linear(A, F.dequantize_fp4(B, state).to(A.dtype).t(), bias)
# 3. Save state
ctx.state = state
ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype
if any(ctx.needs_input_grad[:2]):
ctx.tensors = (A, B)
else:
ctx.tensors = (None, None)
return output
|
tests.test_functional/test_gemv_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
94168d79d74174ee4ba7c183e2cfc7dacc89c939
|
Added FP4 fast inference support.
|
<1>:<add> for dim in [128, 256, 512, 1024, 2048, 4096]:
<del> for dim in [64, 128, 256, 512, 1024, 2048, 4096]:
<9>:<add> for i in range(1):
<del> for i in range(100):
<26>:<add> qB, state = F.quantize_4bit(B, quant_type=storage_type)
<del> qB, state = F.quantize_nf4
|
<s>test.mark.parametrize("dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16], ids=['fp16', 'bf16'])
#@pytest.mark.parametrize("dtype", [torch.bfloat16], ids=['bf16'])
+ def test_gemv_4bit(dtype, storage_type):
- def test_gemv_4bit(dtype):
<0> print('')
<1> for dim in [64, 128, 256, 512, 1024, 2048, 4096]:
<2> #for dim in [4*1024]:
<3> #for dim in [1*16]:
<4> errs = []
<5> relerrs = []
<6> max_err = 0
<7> max_relerr = 0
<8>
<9> for i in range(100):
<10> #A = torch.rand(2, 4092, dtype=dtype, device='cuda')
<11> #B = torch.rand(4*4092, 4092, dtype=dtype, device='cuda')
<12> #A = torch.rand(1, 4096, dtype=dtype, device='cuda')
<13> #B = torch.rand(4*4096, 4096, dtype=dtype, device='cuda')
<14> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<15> B = torch.randn(4*dim, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<16> #B = torch.randn(1, dim+2, dtype=dtype, device='cuda')/math.sqrt(dim)
<17>
<18> #print('')
<19> #print(A)
<20> #print(B.t())
<21> #A[:, :-1] = 0
<22> #B[:, :-1] = 0
<23> #A.flatten()[:-1] = 0
<24> #B.flatten()[:-1] = 0
<25>
<26> qB, state = F.quantize_nf4</s>
|
===========below chunk 0===========
<s>etrize("dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16], ids=['fp16', 'bf16'])
#@pytest.mark.parametrize("dtype", [torch.bfloat16], ids=['bf16'])
+ def test_gemv_4bit(dtype, storage_type):
- def test_gemv_4bit(dtype):
# offset: 1
F.dequantize_nf4(qB, state)
C2 = F.gemv_4bit(A, qB.t(), state=state)
C3 = torch.matmul(A, B.t())
A.requires_grad = True
C1 = bnb.matmul_4bit(A, qB.t(), state)
#print(state)
#print(qB)
#print('')
#print(A)
#print(B)
#print('='*89)
#print(C3.flatten()[-20:])
#print(C3)
#print(C1.shape, C2.shape)
# tensor cores are non-deterministic
# so we need to analyze errors around the mean
# to test our implementation
err = torch.abs(C1-C2).float()
mag = torch.abs(C1).float()+1e-5
relerr = err/mag
max_err = max(err.max(), max_err)
max_relerr = max(relerr.max(), max_relerr)
err = err.mean().item()
relerr = relerr.mean().item()
#print(err)
errs.append(err)
relerrs.append(relerr)
c = int(C1.numel()*0.0014*(dim/256))+1
c = assert_all_approx_close(C1, C2, 1e-5, 0.01,</s>
===========below chunk 1===========
<s>etrize("dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16], ids=['fp16', 'bf16'])
#@pytest.mark.parametrize("dtype", [torch.bfloat16], ids=['bf16'])
+ def test_gemv_4bit(dtype, storage_type):
- def test_gemv_4bit(dtype):
# offset: 2
<s>+1
c = assert_all_approx_close(C1, C2, 1e-5, 0.01, count=c, throw=False)
#print('')
#print(dim, sum(errs)/len(errs)/math.sqrt(dim))
#print(dim, sum(relerrs)/len(relerrs)/math.sqrt(dim))
#print(dim, (max_err.item(), max_relerr.item()))
print(C1.flatten()[-20:])
print(C2.flatten()[-20:])
print(sum(errs)/len(errs)/math.sqrt(dim) , 0.00015)
print(sum(relerrs)/len(relerrs)/math.sqrt(dim) , 0.0015)
if dtype == torch.float16:
assert sum(errs)/len(errs)/math.sqrt(dim) < 5e-5
assert sum(relerrs)/len(relerrs)/math.sqrt(dim) < 0.0005
else:
assert sum(errs)/len(errs)/math.sqrt(dim) < 3e-4
assert sum(relerrs)/len(relerrs)/math.sqrt(dim) < 0.003
===========changed ref 0===========
# module: bitsandbytes.autograd._functions
class MatMul4Bit(torch.autograd.Function):
@staticmethod
def backward(ctx, grad_output):
if ctx.is_empty:
bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)
return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None
req_gradA, _, _, req_gradBias, _= ctx.needs_input_grad
A, B = ctx.tensors
state = ctx.state
grad_A, grad_B, grad_bias = None, None, None
if req_gradBias:
# compute grad_bias first before changing grad_output dtype
grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)
# not supported by PyTorch. TODO: create work-around
#if req_gradB: grad_B = torch.matmul(grad_output.t(), A)
+ if req_gradA: grad_A = torch.matmul(grad_output, F.dequantize_4bit(B, ctx.state).to(grad_output.dtype).t())
- if req_gradA: grad_A = torch.matmul(grad_output, F.dequantize_fp4(B, ctx.state).to(grad_output.dtype).t())
return grad_A, grad_B, None, grad_bias, None
===========changed ref 1===========
# module: bitsandbytes.autograd._functions
class MatMul4Bit(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
def forward(ctx, A, B, out=None, bias=None, state=None):
# default of pytorch behavior if inputs are empty
ctx.is_empty = False
if prod(A.shape) == 0:
ctx.is_empty = True
ctx.A = A
ctx.B = B
ctx.bias = bias
B_shape = state[1]
if A.shape[-1] == B_shape[0]:
return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)
else:
return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)
# 1. Dequantize
# 2. MatmulnN
+ output = torch.nn.functional.linear(A, F.dequantize_4bit(B, state).to(A.dtype).t(), bias)
- output = torch.nn.functional.linear(A, F.dequantize_fp4(B, state).to(A.dtype).t(), bias)
# 3. Save state
ctx.state = state
ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype
if any(ctx.needs_input_grad[:2]):
ctx.tensors = (A, B)
else:
ctx.tensors = (None, None)
return output
|
bitsandbytes.functional/gemv_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
0f0390acb2a6307c6a92bbef2ff095bd7cbcdc90
|
Added double quantization support and tests.
|
<0>:<add> prev_device = pre_call(A.device)
<2>:<del> Bshape = B.shape
<3>:<del> bout = Bshape[1]
<4>:<del> else:
<5>:<add> raise ValueError(f'state cannot None. gem_4bit( ) requires the state from quantize_4bit( )')
<add>
<add> Bshape = state[1]
<del> Bshape = state[1]
<6>:<add> bout = Bshape[0]
<del> bout = Bshape[0]
<7>:<add> absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = state
<add> if compressed_stats is not None:
<add> offset, state2 = compressed_stats
<add> absmax = dequantize_blockwise(absmax, state2)
<add> absmax += offset
<add>
<9>:<add>
<add>
<add>
|
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
<0> #sout = check_matmul(A, B, out, transposed_A, transposed_B, expected_type=A.dtype)
<1> if state is None:
<2> Bshape = B.shape
<3> bout = Bshape[1]
<4> else:
<5> Bshape = state[1]
<6> bout = Bshape[0]
<7> if out is None:
<8> out = torch.zeros(size=(A.shape[0], bout), dtype=A.dtype, device=A.device)
<9>
<10> sA = A.shape
<11> sB = B.shape
<12> if transposed_A and len(sA) == 2:
<13> sA = (sA[1], sA[0])
<14> elif transposed_A and len(sA) == 3:
<15> sA = (sA[0], sA[2], sA[0])
<16> if transposed_B and len(sB) == 2:
<17> sB = (sB[1], sB[0])
<18> elif transposed_B and len(sB) == 3:
<19> sB = (sB[0], sB[2], sB[0])
<20> # this is a mess: cuBLAS expect column major, but PyTorch is row major.
<21> # So to perform the matrix multiplication, we have to treat A, B, and C matrices
<22> # (transpose of row major is column major)
<23> # This means we compute B^T A^T = C^T and we explicitly switch the dimensions of each of these
<24>
<25> # matrices in the input arguments for cuBLAS
<26> # column major: A @ B = C: [m, k] @ [k, n] = [m, n]
<27> # row major: B^T @ A^T = C^T: [m</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
# offset: 1
# column major with row major layout: B^T @ A^T = C^T: [k, m] @ [n, k] = [n, m]
if len(sB) == 2:
if B.stride()[0] == B.shape[1]:
transposed_B = False
elif B.stride()[1] == B.shape[0]:
transposed_B = True
if len(A.shape) == 2:
if A.stride()[0] == A.shape[1]:
transposed_A = False
elif A.stride()[1] == A.shape[0]:
transposed_A = True
else:
if A.stride()[1] == A.shape[2]:
transposed_A = False
elif A.stride()[2] == A.shape[1]:
transposed_A = True
if len(sA) == 2:
n = sA[0]
ldb = A.stride()[1 if transposed_A else 0]
elif len(sA) == 3 and len(sB) == 2:
n = sA[0] * sA[1]
ldb = sA[2]
m = sB[1]
k = sB[0]
lda = B.stride()[0]
ldc = sB[1]
elif len(sB) == 3:
# special case
assert len(sA) == 3
if not (sA[0] == sB[0] and sA[1] == sB[1]):
raise ValueError(
f"Only bsi,bso->io supported for tensor contractions, but dims for A x B were: {sA} x {sB}"
)
transposed</s>
===========below chunk 1===========
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
# offset: 2
<s> for tensor contractions, but dims for A x B were: {sA} x {sB}"
)
transposed_A = True
transposed_B = False
m = sB[2]
n = sA[2]
k = sB[0] * sB[1]
lda = n
ldb = sA[2]
ldc = m
# B^T @ A^T = C^T
# [km, nk -> mn]
#lda = ldb = ldc = 1
#lda = 1
if state is not None:
m = Bshape[0]
k = Bshape[1]
lda = Bshape[0]
ldc = Bshape[0]
ldb = (ldb+1)//2
#print(m, n, k, lda, ldb, ldc)
is_on_gpu([B, A, out])
m = ct.c_int32(m)
n = ct.c_int32(n)
k = ct.c_int32(k)
lda = ct.c_int32(lda)
ldb = ct.c_int32(ldb)
ldc = ct.c_int32(ldc)
if B.dtype == torch.uint8:
if A.dtype == torch.float16:
lib.cgemm_4bit_inference_naive_fp16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(state[0]), get_ptr(state[-1]), get_ptr(out), lda</s>
===========below chunk 2===========
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
# offset: 3
<s>db, ldc, ct.c_int32(state[3]))
elif A.dtype == torch.bfloat16:
lib.cgemm_4bit_inference_naive_bf16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(state[0]), get_ptr(state[-1]), get_ptr(out), lda, ldb, ldc, ct.c_int32(state[3]))
else:
raise NotImplementedError(f'Matmul not implemented for data type {A.dtype}')
else:
raise NotImplementedError(f'Matmul not implemented for data type {A.dtype}')
return out
|
tests.test_functional/test_bench_matmul
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
0f0390acb2a6307c6a92bbef2ff095bd7cbcdc90
|
Added double quantization support and tests.
|
<11>:<add> B_nf4_c, state_nf4_c = F.quantize_nf4(B, compress_statistics=True)
<23>:<add> bnb.matmul_4bit(A, B_nf4.t(), quant_state=state_nf4)
<del> F.gemv_4bit(A, B_nf4.t(), state=state_nf4)
|
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
<0> iters = 80
<1> formatB = F.get_special_format_str()
<2>
<3> A = torch.randn(batch, seq, model, device="cuda").half()
<4> B = torch.empty(hidden, model, dtype=torch.float16, device="cuda")
<5> torch.nn.init.xavier_uniform_(B)
<6>
<7> B_fp4, state = F.quantize_fp4(B)
<8> B_fp4_c, state_c = F.quantize_fp4(B, compress_statistics=True)
<9>
<10> B_nf4, state_nf4 = F.quantize_nf4(B)
<11>
<12> linear8bit = bnb.nn.Linear8bitLt(model, hidden, False, False).cuda().half()
<13> linear8bit.eval()
<14>
<15> outliers = torch.randint(0, model, size=(5,)).cuda()
<16> A[:, :, outliers] = 8.0
<17>
<18> linearMixedBit = (bnb.nn.Linear8bitLt(model, hidden, False, False, threshold=6.0).cuda().half())
<19> #linearMixedBit.eval()
<20>
<21> linear8bit_train = bnb.nn.Linear8bitLt(model, hidden, False).cuda().half()
<22> linear8bit_train_thresh = bnb.nn.Linear8bitLt(model, hidden, False, threshold=6.0).cuda().half()
<23> F.gemv_4bit(A, B_nf4.t(), state=state_nf4)
<24>
<25> # warmup
<26> for i in range(iters):
<27> torch.matmul(A, B.t())
<28> torch.cuda.synchronize()
<29> print("")
<30>
<31> torch.cuda.</s>
|
===========below chunk 0===========
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
# offset: 1
t0 = time.time()
for i in range(iters):
torch.matmul(A, B.t())
torch.cuda.synchronize()
print( f"pytorch fp16: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
#torch.cuda.synchronize()
#t0 = time.time()
#for i in range(iters):
# bnb.matmul_4bit(A, B_fp4.t(), quant_state=state)
#torch.cuda.synchronize()
#print( f"bnb fp4: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
#torch.cuda.synchronize()
#t0 = time.time()
#for i in range(iters):
# bnb.matmul_4bit(A, B_fp4.t(), quant_state=state_c)
#torch.cuda.synchronize()
#print( f"bnb fp4 + compressed stats: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
#bnb.matmul_4bit(A, B_nf4.t(), quant_state=state_nf4)
F.gemv_4bit(A, B_nf4.t(), state=state_nf4)
torch</s>
===========below chunk 1===========
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
# offset: 2
<s> F.gemv_4bit(A, B_nf4.t(), state=state_nf4)
torch.cuda.synchronize()
print( f"bnb nf4: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
===========changed ref 0===========
# module: tests.test_functional
+ batch_size = 5
- batch_size = 1
seqdim = 1
values = []
#values.append((batch_size, seqdim, 768, 4 * 768))
#values.append((batch_size, seqdim, 1024, 4*1024))
#values.append((batch_size, seqdim, 1536, 4*1536))
#values.append((batch_size, seqdim, 2048, 4*2048))
#values.append((batch_size, seqdim, 2560, 4*2560))
#values.append((batch_size, seqdim, 4096, 4*4096))
#values.append((batch_size, seqdim, 5120, 4*5120))
+ values.append((batch_size, seqdim, 6656, 4*6656))
- #values.append((batch_size, seqdim, 6656, 4*6656))
+ #values.append((batch_size, seqdim, 8192, 4*8192))
- values.append((batch_size, seqdim, 8192, 4*8192))
#values.append((batch_size, seqdim, 5140, 4*5140))
#values.append((batch_size, seqdim, 12288, 4*12288))
names = ["batch_{}_seq_{}_model_{}_hidden_{}".format(*vals) for vals in values]
===========changed ref 1===========
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
+ prev_device = pre_call(A.device)
#sout = check_matmul(A, B, out, transposed_A, transposed_B, expected_type=A.dtype)
if state is None:
- Bshape = B.shape
- bout = Bshape[1]
- else:
+ raise ValueError(f'state cannot None. gem_4bit( ) requires the state from quantize_4bit( )')
+
+ Bshape = state[1]
- Bshape = state[1]
+ bout = Bshape[0]
- bout = Bshape[0]
+ absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = state
+ if compressed_stats is not None:
+ offset, state2 = compressed_stats
+ absmax = dequantize_blockwise(absmax, state2)
+ absmax += offset
+
if out is None:
out = torch.zeros(size=(A.shape[0], bout), dtype=A.dtype, device=A.device)
+
+
+
sA = A.shape
sB = B.shape
if transposed_A and len(sA) == 2:
sA = (sA[1], sA[0])
elif transposed_A and len(sA) == 3:
sA = (sA[0], sA[2], sA[0])
if transposed_B and len(sB) == 2:
sB = (sB[1], sB[0])
elif transposed_B and len(sB) == 3:
sB = (sB[0], sB[2], sB[0])
# this is a mess: cuBLAS expect column major, but PyTorch is row</s>
===========changed ref 2===========
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
# offset: 1
<s>[2], sB[0])
# this is a mess: cuBLAS expect column major, but PyTorch is row major.
# So to perform the matrix multiplication, we have to treat A, B, and C matrices
# (transpose of row major is column major)
# This means we compute B^T A^T = C^T and we explicitly switch the dimensions of each of these
# matrices in the input arguments for cuBLAS
# column major: A @ B = C: [m, k] @ [k, n] = [m, n]
# row major: B^T @ A^T = C^T: [m, k] @ [k, n] = [m, n]
# column major with row major layout: B^T @ A^T = C^T: [k, m] @ [n, k] = [n, m]
if len(sB) == 2:
if B.stride()[0] == B.shape[1]:
transposed_B = False
elif B.stride()[1] == B.shape[0]:
transposed_B = True
if len(A.shape) == 2:
if A.stride()[0] == A.shape[1]:
transposed_A = False
elif A.stride()[1] == A.shape[0]:
transposed_A = True
else:
if A.stride()[1] == A.shape[2]:
transposed_A = False
elif A.stride()[2] == A.shape[1]:
transposed_A = True
if len(sA) == 2:
n = sA[0]
</s>
|
tests.test_functional/test_gemv_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
0f0390acb2a6307c6a92bbef2ff095bd7cbcdc90
|
Added double quantization support and tests.
|
<9>:<add> for i in range(100):
<del> for i in range(1):
<26>:<add> qB, state = F.quantize_4bit(B, quant_type=storage_type, compress_statistics=double_quant)
<del> qB, state = F.quantize_4bit(B,
|
<s>dtype", [torch.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16], ids=['fp16', 'bf16'])
#@pytest.mark.parametrize("dtype", [torch.bfloat16], ids=['bf16'])
+ def test_gemv_4bit(dtype, storage_type, double_quant):
- def test_gemv_4bit(dtype, storage_type):
<0> print('')
<1> for dim in [128, 256, 512, 1024, 2048, 4096]:
<2> #for dim in [4*1024]:
<3> #for dim in [1*16]:
<4> errs = []
<5> relerrs = []
<6> max_err = 0
<7> max_relerr = 0
<8>
<9> for i in range(1):
<10> #A = torch.rand(2, 4092, dtype=dtype, device='cuda')
<11> #B = torch.rand(4*4092, 4092, dtype=dtype, device='cuda')
<12> #A = torch.rand(1, 4096, dtype=dtype, device='cuda')
<13> #B = torch.rand(4*4096, 4096, dtype=dtype, device='cuda')
<14> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<15> B = torch.randn(4*dim, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<16> #B = torch.randn(1, dim+2, dtype=dtype, device='cuda')/math.sqrt(dim)
<17>
<18> #print('')
<19> #print(A)
<20> #print(B.t())
<21> #A[:, :-1] = 0
<22> #B[:, :-1] = 0
<23> #A.flatten()[:-1] = 0
<24> #B.flatten()[:-1] = 0
<25>
<26> qB, state = F.quantize_4bit(B,</s>
|
===========below chunk 0===========
<s>.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16], ids=['fp16', 'bf16'])
#@pytest.mark.parametrize("dtype", [torch.bfloat16], ids=['bf16'])
+ def test_gemv_4bit(dtype, storage_type, double_quant):
- def test_gemv_4bit(dtype, storage_type):
# offset: 1
F.dequantize_4bit(qB, state)
C2 = F.gemv_4bit(A, qB.t(), state=state)
C3 = torch.matmul(A, B.t())
A.requires_grad = True
C1 = bnb.matmul_4bit(A, qB.t(), state)
#print(state)
#print(qB)
#print('')
#print(A)
#print(B)
#print('='*89)
#print(C3)
#print(C1.shape, C2.shape)
# tensor cores are non-deterministic
# so we need to analyze errors around the mean
# to test our implementation
err = torch.abs(C1-C2).float()
mag = torch.abs(C1).float()+1e-5
relerr = err/mag
max_err = max(err.max(), max_err)
max_relerr = max(relerr.max(), max_relerr)
err = err.mean().item()
relerr = relerr.mean().item()
#print(err)
errs.append(err)
relerrs.append(relerr)
c = int(C1.numel()*0.0014*(dim/256))+1
c = assert_all_approx_close(C1, C2, 1e-5, 0.01, count=c, throw=False)
#print('')
</s>
===========below chunk 1===========
<s>.float32, torch.float16], ids=['fp32', 'fp16'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16], ids=['fp16', 'bf16'])
#@pytest.mark.parametrize("dtype", [torch.bfloat16], ids=['bf16'])
+ def test_gemv_4bit(dtype, storage_type, double_quant):
- def test_gemv_4bit(dtype, storage_type):
# offset: 2
<s>C1, C2, 1e-5, 0.01, count=c, throw=False)
#print('')
#print(dim, sum(errs)/len(errs)/math.sqrt(dim))
#print(dim, sum(relerrs)/len(relerrs)/math.sqrt(dim))
#print(dim, (max_err.item(), max_relerr.item()))
print(C1.flatten()[-20:])
print(C2.flatten()[-20:])
print(C3.flatten()[-20:])
print(sum(errs)/len(errs)/math.sqrt(dim) , dim)
print(sum(relerrs)/len(relerrs)/math.sqrt(dim) , dim)
if dtype == torch.float16:
assert sum(errs)/len(errs)/math.sqrt(dim) < 5e-5
assert sum(relerrs)/len(relerrs)/math.sqrt(dim) < 0.0005
else:
assert sum(errs)/len(errs)/math.sqrt(dim) < 3e-4
assert sum(relerrs)/len(relerrs)/math.sqrt(dim) < 0.003
===========changed ref 0===========
# module: tests.test_functional
+ batch_size = 5
- batch_size = 1
seqdim = 1
values = []
#values.append((batch_size, seqdim, 768, 4 * 768))
#values.append((batch_size, seqdim, 1024, 4*1024))
#values.append((batch_size, seqdim, 1536, 4*1536))
#values.append((batch_size, seqdim, 2048, 4*2048))
#values.append((batch_size, seqdim, 2560, 4*2560))
#values.append((batch_size, seqdim, 4096, 4*4096))
#values.append((batch_size, seqdim, 5120, 4*5120))
+ values.append((batch_size, seqdim, 6656, 4*6656))
- #values.append((batch_size, seqdim, 6656, 4*6656))
+ #values.append((batch_size, seqdim, 8192, 4*8192))
- values.append((batch_size, seqdim, 8192, 4*8192))
#values.append((batch_size, seqdim, 5140, 4*5140))
#values.append((batch_size, seqdim, 12288, 4*12288))
names = ["batch_{}_seq_{}_model_{}_hidden_{}".format(*vals) for vals in values]
===========changed ref 1===========
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
iters = 80
formatB = F.get_special_format_str()
A = torch.randn(batch, seq, model, device="cuda").half()
B = torch.empty(hidden, model, dtype=torch.float16, device="cuda")
torch.nn.init.xavier_uniform_(B)
B_fp4, state = F.quantize_fp4(B)
B_fp4_c, state_c = F.quantize_fp4(B, compress_statistics=True)
B_nf4, state_nf4 = F.quantize_nf4(B)
+ B_nf4_c, state_nf4_c = F.quantize_nf4(B, compress_statistics=True)
linear8bit = bnb.nn.Linear8bitLt(model, hidden, False, False).cuda().half()
linear8bit.eval()
outliers = torch.randint(0, model, size=(5,)).cuda()
A[:, :, outliers] = 8.0
linearMixedBit = (bnb.nn.Linear8bitLt(model, hidden, False, False, threshold=6.0).cuda().half())
#linearMixedBit.eval()
linear8bit_train = bnb.nn.Linear8bitLt(model, hidden, False).cuda().half()
linear8bit_train_thresh = bnb.nn.Linear8bitLt(model, hidden, False, threshold=6.0).cuda().half()
+ bnb.matmul_4bit(A, B_nf4.t(), quant_state=state_nf4)
- F.gemv_4bit(A, B_nf4.t(), state=state_nf4)
# warmup
for i in range(iters):</s>
|
bitsandbytes.functional/gemv_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6a905be5ced93c46e35b675fbdc73d40bb95d3ee
|
Fixed a bug where gemv_4bit would return a wrongly sized tensor.
|
<14>:<add> if len(A.shape) == 3:
<add> out = torch.zeros(size=(A.shape[0], A.shape[1], bout), dtype=A.dtype, device=A.device)
<add> else:
<add> out = torch.zeros(size=(A.shape[0], bout), dtype=A.dtype, device=A.device)
<del> out = torch.zeros(size=(A.shape[0], bout), dtype=A.dtype, device=A.device)
|
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
<0> prev_device = pre_call(A.device)
<1> #sout = check_matmul(A, B, out, transposed_A, transposed_B, expected_type=A.dtype)
<2> if state is None:
<3> raise ValueError(f'state cannot None. gem_4bit( ) requires the state from quantize_4bit( )')
<4>
<5> Bshape = state[1]
<6> bout = Bshape[0]
<7> absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = state
<8> if compressed_stats is not None:
<9> offset, state2 = compressed_stats
<10> absmax = dequantize_blockwise(absmax, state2)
<11> absmax += offset
<12>
<13> if out is None:
<14> out = torch.zeros(size=(A.shape[0], bout), dtype=A.dtype, device=A.device)
<15>
<16>
<17>
<18>
<19> sA = A.shape
<20> sB = B.shape
<21> if transposed_A and len(sA) == 2:
<22> sA = (sA[1], sA[0])
<23> elif transposed_A and len(sA) == 3:
<24> sA = (sA[0], sA[2], sA[0])
<25> if transposed_B and len(sB) == 2:
<26> sB = (sB[1], sB[0])
<27> elif transposed_B and len(sB) == 3:
<28> sB = (sB[0], sB[2], sB[0])
<29> # this is a mess: cuBLAS expect column major, but PyTorch is row major.
<30> # So to perform the matrix multiplication, we have to treat A, B, and C matrices
<31> #</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
# offset: 1
# This means we compute B^T A^T = C^T and we explicitly switch the dimensions of each of these
# matrices in the input arguments for cuBLAS
# column major: A @ B = C: [m, k] @ [k, n] = [m, n]
# row major: B^T @ A^T = C^T: [m, k] @ [k, n] = [m, n]
# column major with row major layout: B^T @ A^T = C^T: [k, m] @ [n, k] = [n, m]
if len(sB) == 2:
if B.stride()[0] == B.shape[1]:
transposed_B = False
elif B.stride()[1] == B.shape[0]:
transposed_B = True
if len(A.shape) == 2:
if A.stride()[0] == A.shape[1]:
transposed_A = False
elif A.stride()[1] == A.shape[0]:
transposed_A = True
else:
if A.stride()[1] == A.shape[2]:
transposed_A = False
elif A.stride()[2] == A.shape[1]:
transposed_A = True
if len(sA) == 2:
n = sA[0]
ldb = A.stride()[1 if transposed_A else 0]
elif len(sA) == 3 and len(sB) == 2:
n = sA[0] * sA[1]
ldb = sA[2]
m = sB[1]
k = sB[0]
lda = B.stride()[0]
ldc = sB[</s>
===========below chunk 1===========
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
# offset: 2
<s>]
k = sB[0]
lda = B.stride()[0]
ldc = sB[1]
elif len(sB) == 3:
# special case
assert len(sA) == 3
if not (sA[0] == sB[0] and sA[1] == sB[1]):
raise ValueError(
f"Only bsi,bso->io supported for tensor contractions, but dims for A x B were: {sA} x {sB}"
)
transposed_A = True
transposed_B = False
m = sB[2]
n = sA[2]
k = sB[0] * sB[1]
lda = n
ldb = sA[2]
ldc = m
# B^T @ A^T = C^T
# [km, nk -> mn]
#lda = ldb = ldc = 1
#lda = 1
if state is not None:
m = Bshape[0]
k = Bshape[1]
lda = Bshape[0]
ldc = Bshape[0]
ldb = (ldb+1)//2
#print(m, n, k, lda, ldb, ldc)
is_on_gpu([B, A, out])
m = ct.c_int32(m)
n = ct.c_int32(n)
k = ct.c_int32(k)
lda = ct.c_int32(lda)
ldb = ct.c_int32(ldb</s>
===========below chunk 2===========
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
# offset: 3
<s> ldc = ct.c_int32(ldc)
if B.dtype == torch.uint8:
if A.dtype == torch.float16:
lib.cgemm_4bit_inference_naive_fp16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(state[-1]), get_ptr(out), lda, ldb, ldc, ct.c_int32(state[3]))
elif A.dtype == torch.bfloat16:
lib.cgemm_4bit_inference_naive_bf16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(state[-1]), get_ptr(out), lda, ldb, ldc, ct.c_int32(state[3]))
else:
raise NotImplementedError(f'Matmul not implemented for data type {A.dtype}')
else:
raise NotImplementedError(f'Matmul not implemented for data type {A.dtype}')
post_call(prev_device)
return out
|
bitsandbytes.nn.modules/Params4bit.to
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
cef519c89ed04fdd6f3c09a672f8520532a89994
|
Added test for Param4bit.to() and fixed double quant behavior.
|
<18>:<add> s[-3][0] = s[-3][0].to(device) # offset
<del> s[-2][0] = s[-2][0].to(device) # offset
<19>:<add> s[-3][1][0] = s[-3][1][0].to(device) # nested quantiation state statitics
<del> s[-2][1][0] = s[-2][1][0].to(device) # nested quantiation state statitics
<20>:<add> s[-3][1][1] = s[-3][1][1].to(device) # nested quantiation codebook
<del> s[-2][1][1] = s[-2][1][1].to(device) # nested quantiation codebook
|
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
def to(self, *args, **kwargs):
<0> device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)
<1>
<2> if (device is not None and device.type == "cuda" and self.data.device.type == "cpu"):
<3> return self.cuda(device)
<4> else:
<5> s = self.quant_state
<6> if s is not None:
<7> # make sure the quantization state is on the right device
<8> s[0] = s[0].to(device)
<9> if self.compress_statistics:
<10> # TODO: refactor this. This is a nightmare
<11> # for 4-bit:
<12> # state = [qabsmax, input_shape, A.dtype, blocksize, [offset, state2], quant_type]
<13> # state2 = [absmax, input_shape, A.dtype, blocksize, None, quant_type]
<14> #s[-2][0] = s[-2][0].to(device) # offset
<15> #s[-2][1][0] = s[-2][1][0].to(device) # nested absmax
<16>
<17> # for 8-bit
<18> s[-2][0] = s[-2][0].to(device) # offset
<19> s[-2][1][0] = s[-2][1][0].to(device) # nested quantiation state statitics
<20> s[-2][1][1] = s[-2][1][1].to(device) # nested quantiation codebook
<21> new_param = Params4bit(super().to(device=device, dtype=dtype, non_blocking=non_blocking),
<22> requires_grad=self.requires_grad, quant_state=self.quant_state,
<23> blocksize=self.blocksize, compress_statistics=self.compress_statistics,
<24> quant_type=self.quant_type)
<25>
<26> return new_param
<27>
|
===========unchanged ref 0===========
at: bitsandbytes.nn.modules
Params4bit(data: Tensor=..., requires_grad: builtins.bool=...)
at: bitsandbytes.nn.modules.Params4bit
cuda(device)
at: bitsandbytes.nn.modules.Params4bit.cuda
self.data = w_4bit
self.quant_state = quant_state
at: torch._tensor.Tensor.__setstate__
self.requires_grad, _, self._backward_hooks = state
|
tests.test_modules/test_kbit_backprop
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
cef519c89ed04fdd6f3c09a672f8520532a89994
|
Added test for Param4bit.to() and fixed double quant behavior.
|
<15>:<add> kbit = kbit.half().to('cuda')
|
# module: tests.test_modules
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
@pytest.mark.parametrize("module", modules, ids=names)
def test_kbit_backprop(module):
<0> b = 17
<1> dim1 = 37
<2> dim2 = 83
<3>
<4> ref = nn.Sequential(*[torch.nn.Linear(dim1, dim2), torch.nn.Linear(dim2, 10)])
<5> ref[1].weight.requires_grad = False
<6> torch.nn.init.kaiming_normal_(ref[0].weight)
<7> torch.nn.init.kaiming_normal_(ref[1].weight)
<8> kbit = nn.Sequential(*[torch.nn.Linear(dim1, dim2), module(dim2, 10)])
<9> kbit[0].weight.detach().copy_(ref[0].weight)
<10> kbit[1].weight.detach().copy_(ref[1].weight)
<11> kbit[0].bias.detach().copy_(ref[0].bias)
<12> kbit[1].bias.detach().copy_(ref[1].bias)
<13> ref = ref.half().cuda()
<14> kbit = kbit.half().cuda()
<15>
<16> errs1 = []
<17> errs2 = []
<18> relerrs1 = []
<19> relerrs2 = []
<20> for i in range(100):
<21> batch = torch.randn(b, dim1).half().cuda()
<22> out1 = ref(batch)
<23> out2 = kbit(batch)
<24> out1.mean().backward()
<25> out2.mean().backward()
<26>
<27> grad1 = ref[0].weight.grad
<28> grad2 = kbit[0].weight.grad
<29> bgrad1 = ref[0].bias.grad
<30> bgrad2 = kbit[0].bias.grad
<31>
<32> err1 = (out1-out2).abs().float()
</s>
|
===========below chunk 0===========
# module: tests.test_modules
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
@pytest.mark.parametrize("module", modules, ids=names)
def test_kbit_backprop(module):
# offset: 1
relerr1 = (err1/(out1.abs().float()+1e-9))
relerr2 = (err2/(grad1.abs().float()+1e-9))
errs1.append(err1.mean().item())
errs2.append(err2.mean().item())
relerrs1.append(relerr1.mean().item())
relerrs2.append(relerr2.mean().item())
if isinstance(module, bnb.nn.Linear8bitLt):
torch.testing.assert_close(grad1, grad2, atol=0.008, rtol=0.05)
torch.testing.assert_close(bgrad1, bgrad2, atol=0.008, rtol=0.05)
else:
torch.testing.assert_close(grad1, grad2, atol=0.015, rtol=0.05)
torch.testing.assert_close(bgrad1, bgrad2, atol=0.02, rtol=0.05)
ref.zero_grad()
kbit.zero_grad()
assert kbit[0].weight.grad is None or kbit[0].weight.grad.sum().item() == 0
assert kbit[0].weight.grad is None or kbit[0].bias.grad.sum().item() == 0
print('out', sum(errs1)/len(errs1))
print('grad', sum(errs2)/len(errs2))
print('rel out', sum(relerrs1)/len(relerrs1))
print('rel grad', sum(relerrs2)/len(relerrs2))
===========changed ref 0===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
def to(self, *args, **kwargs):
device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)
if (device is not None and device.type == "cuda" and self.data.device.type == "cpu"):
return self.cuda(device)
else:
s = self.quant_state
if s is not None:
# make sure the quantization state is on the right device
s[0] = s[0].to(device)
if self.compress_statistics:
# TODO: refactor this. This is a nightmare
# for 4-bit:
# state = [qabsmax, input_shape, A.dtype, blocksize, [offset, state2], quant_type]
# state2 = [absmax, input_shape, A.dtype, blocksize, None, quant_type]
#s[-2][0] = s[-2][0].to(device) # offset
#s[-2][1][0] = s[-2][1][0].to(device) # nested absmax
# for 8-bit
+ s[-3][0] = s[-3][0].to(device) # offset
- s[-2][0] = s[-2][0].to(device) # offset
+ s[-3][1][0] = s[-3][1][0].to(device) # nested quantiation state statitics
- s[-2][1][0] = s[-2][1][0].to(device) # nested quantiation state statitics
+ s[-3][1][1] = s[-3][1][1].to(device) # nested quantiation codebook
- s[-2][1][1] = s[-2][1][1].to(device) # nested quantiation codebook
new_param = Params4bit(super().to(device=device, dtype=dtype, non_blocking=non_blocking),
requires_grad=self.</s>
===========changed ref 1===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
def to(self, *args, **kwargs):
# offset: 1
<s>bit(super().to(device=device, dtype=dtype, non_blocking=non_blocking),
requires_grad=self.requires_grad, quant_state=self.quant_state,
blocksize=self.blocksize, compress_statistics=self.compress_statistics,
quant_type=self.quant_type)
return new_param
|
bitsandbytes.functional/gemv_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
5fab6734424a78a2a4594525386cd84feb67fb50
|
Added fp32 compute type for gemv_4bit.
|
<4>:<add>
<add> if A.numel() != A.shape[-1]:
<add> raise ValueError(f'Dimensions of A are invalid. Must be a vector with the leading dimensions of "1", e.g. [1, 1, 2048]')
<15>:<add> out = torch.empty(size=(A.shape[0], A.shape[1], bout), dtype=A.dtype, device=A.device)
<del> out = torch.zeros(size=(A.shape[0], A.shape[1], bout), dtype=A.dtype, device=A.device)
<17>:<add> out = torch.empty(size=(A.shape[0], bout), dtype=A.dtype, device=A.device)
<del> out = torch.zeros(size=(A.shape[0], bout), dtype=A.dtype, device=A.device)
<19>:<del>
<20>:<del>
<21>:<del>
<22>:<del> sA = A.shape
<23>:<del> sB = B.shape
<24>:<del> if transposed_A and len(sA) == 2:
<25>:<del> sA = (sA[1], sA[0])
<26>:<del> elif transposed_A and len(sA) == 3:
<27>:<del> sA = (sA[0], sA[2], sA[0])
<28>:<del> if transposed_B and len(sB) == 2:
<29>:<del> sB = (sB[1], sB[0])
<30>:<del> elif transposed_B and len(sB) == 3:
<31>:<del> sB = (sB[0], sB[2], s
|
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
<0> prev_device = pre_call(A.device)
<1> #sout = check_matmul(A, B, out, transposed_A, transposed_B, expected_type=A.dtype)
<2> if state is None:
<3> raise ValueError(f'state cannot None. gem_4bit( ) requires the state from quantize_4bit( )')
<4>
<5> Bshape = state[1]
<6> bout = Bshape[0]
<7> absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = state
<8> if compressed_stats is not None:
<9> offset, state2 = compressed_stats
<10> absmax = dequantize_blockwise(absmax, state2)
<11> absmax += offset
<12>
<13> if out is None:
<14> if len(A.shape) == 3:
<15> out = torch.zeros(size=(A.shape[0], A.shape[1], bout), dtype=A.dtype, device=A.device)
<16> else:
<17> out = torch.zeros(size=(A.shape[0], bout), dtype=A.dtype, device=A.device)
<18>
<19>
<20>
<21>
<22> sA = A.shape
<23> sB = B.shape
<24> if transposed_A and len(sA) == 2:
<25> sA = (sA[1], sA[0])
<26> elif transposed_A and len(sA) == 3:
<27> sA = (sA[0], sA[2], sA[0])
<28> if transposed_B and len(sB) == 2:
<29> sB = (sB[1], sB[0])
<30> elif transposed_B and len(sB) == 3:
<31> sB = (sB[0], sB[2], s</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
# offset: 1
# this is a mess: cuBLAS expect column major, but PyTorch is row major.
# So to perform the matrix multiplication, we have to treat A, B, and C matrices
# (transpose of row major is column major)
# This means we compute B^T A^T = C^T and we explicitly switch the dimensions of each of these
# matrices in the input arguments for cuBLAS
# column major: A @ B = C: [m, k] @ [k, n] = [m, n]
# row major: B^T @ A^T = C^T: [m, k] @ [k, n] = [m, n]
# column major with row major layout: B^T @ A^T = C^T: [k, m] @ [n, k] = [n, m]
if len(sB) == 2:
if B.stride()[0] == B.shape[1]:
transposed_B = False
elif B.stride()[1] == B.shape[0]:
transposed_B = True
if len(A.shape) == 2:
if A.stride()[0] == A.shape[1]:
transposed_A = False
elif A.stride()[1] == A.shape[0]:
transposed_A = True
else:
if A.stride()[1] == A.shape[2]:
transposed_A = False
elif A.stride()[2] == A.shape[1]:
transposed_A = True
if len(sA) == 2:
n = sA[0]
ldb = A.stride()[1 if transposed_A else 0]
elif len(sA) == 3 and len(sB) == 2:
n = sA</s>
===========below chunk 1===========
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
# offset: 2
<s>_A else 0]
elif len(sA) == 3 and len(sB) == 2:
n = sA[0] * sA[1]
ldb = sA[2]
m = sB[1]
k = sB[0]
lda = B.stride()[0]
ldc = sB[1]
elif len(sB) == 3:
# special case
assert len(sA) == 3
if not (sA[0] == sB[0] and sA[1] == sB[1]):
raise ValueError(
f"Only bsi,bso->io supported for tensor contractions, but dims for A x B were: {sA} x {sB}"
)
transposed_A = True
transposed_B = False
m = sB[2]
n = sA[2]
k = sB[0] * sB[1]
lda = n
ldb = sA[2]
ldc = m
# B^T @ A^T = C^T
# [km, nk -> mn]
#lda = ldb = ldc = 1
#lda = 1
if state is not None:
m = Bshape[0]
k = Bshape[1]
lda = Bshape[0]
ldc = Bshape[0]
ldb = (ldb+1)//2
#print(m, n, k, lda, ldb, ldc)
is_on_gpu([B, A, out])
m = ct.c_int32</s>
===========below chunk 2===========
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
# offset: 3
<s>)
n = ct.c_int32(n)
k = ct.c_int32(k)
lda = ct.c_int32(lda)
ldb = ct.c_int32(ldb)
ldc = ct.c_int32(ldc)
if B.dtype == torch.uint8:
if A.dtype == torch.float16:
lib.cgemm_4bit_inference_naive_fp16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(state[-1]), get_ptr(out), lda, ldb, ldc, ct.c_int32(state[3]))
elif A.dtype == torch.bfloat16:
lib.cgemm_4bit_inference_naive_bf16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(state[-1]), get_ptr(out), lda, ldb, ldc, ct.c_int32(state[3]))
else:
raise NotImplementedError(f'Matmul not implemented for data type {A.dtype}')
else:
raise NotImplementedError(f'Matmul not implemented for data type {A.dtype}')
post_call(prev_device)
return out
|
tests.test_functional/test_bench_matmul
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
5fab6734424a78a2a4594525386cd84feb67fb50
|
Added fp32 compute type for gemv_4bit.
|
<0>:<add> iters = 1000
<del> iters = 80
|
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
<0> iters = 80
<1> formatB = F.get_special_format_str()
<2>
<3> A = torch.randn(batch, seq, model, device="cuda").half()
<4> B = torch.empty(hidden, model, dtype=torch.float16, device="cuda")
<5> torch.nn.init.xavier_uniform_(B)
<6>
<7> B_fp4, state = F.quantize_fp4(B)
<8> B_fp4_c, state_c = F.quantize_fp4(B, compress_statistics=True)
<9>
<10> B_nf4, state_nf4 = F.quantize_nf4(B)
<11> B_nf4_c, state_nf4_c = F.quantize_nf4(B, compress_statistics=True)
<12>
<13> linear8bit = bnb.nn.Linear8bitLt(model, hidden, False, False).cuda().half()
<14> linear8bit.eval()
<15>
<16> outliers = torch.randint(0, model, size=(5,)).cuda()
<17> A[:, :, outliers] = 8.0
<18>
<19> linearMixedBit = (bnb.nn.Linear8bitLt(model, hidden, False, False, threshold=6.0).cuda().half())
<20> #linearMixedBit.eval()
<21>
<22> linear8bit_train = bnb.nn.Linear8bitLt(model, hidden, False).cuda().half()
<23> linear8bit_train_thresh = bnb.nn.Linear8bitLt(model, hidden, False, threshold=6.0).cuda().half()
<24> bnb.matmul_4bit(A, B_nf4.t(), quant_state=state_nf4)
<25>
<26> # warmup
<27> for i in range(iters):
<28> </s>
|
===========below chunk 0===========
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
# offset: 1
torch.cuda.synchronize()
print("")
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
torch.matmul(A, B.t())
torch.cuda.synchronize()
print( f"pytorch fp16: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
#torch.cuda.synchronize()
#t0 = time.time()
#for i in range(iters):
# bnb.matmul_4bit(A, B_fp4.t(), quant_state=state)
#torch.cuda.synchronize()
#print( f"bnb fp4: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
#torch.cuda.synchronize()
#t0 = time.time()
#for i in range(iters):
# bnb.matmul_4bit(A, B_fp4.t(), quant_state=state_c)
#torch.cuda.synchronize()
#print( f"bnb fp4 + compressed stats: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
bnb.matmul_4bit(A, B_nf4.t(), quant_state=state_nf4)
torch.</s>
===========below chunk 1===========
# module: tests.test_functional
@pytest.mark.parametrize("batch, seq, model, hidden", values, ids=names)
def test_bench_matmul(batch, seq, model, hidden):
# offset: 2
<s>matmul_4bit(A, B_nf4.t(), quant_state=state_nf4)
torch.cuda.synchronize()
print( f"bnb nf4: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
torch.cuda.synchronize()
t0 = time.time()
for i in range(iters):
bnb.matmul_4bit(A, B_nf4_c.t(), quant_state=state_nf4_c)
torch.cuda.synchronize()
print( f"bnb nf4+DQ: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s" )
===========unchanged ref 0===========
at: _pytest.mark.structures
MARK_GEN = MarkGenerator(_ispytest=True)
at: _pytest.mark.structures.MarkGenerator
skip: _SkipMarkDecorator
skipif: _SkipifMarkDecorator
xfail: _XfailMarkDecorator
parametrize: _ParametrizeMarkDecorator
usefixtures: _UsefixturesMarkDecorator
filterwarnings: _FilterwarningsMarkDecorator
at: bitsandbytes.functional
get_special_format_str()
at: tests.test_functional
values = []
names = ["batch_{}_seq_{}_model_{}_hidden_{}".format(*vals) for vals in values]
at: torch._C
float16: dtype = ...
===========unchanged ref 1===========
at: torch._C._VariableFunctions
empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
===========unchanged ref 2===========
randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device:</s>
|
bitsandbytes.autograd._functions/MatmulLtState.tile_indices
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
5f492d437e4b156082a20c7fbb95422b7ac36c7d
|
Merge remote-tracking branch 'origin/inference'
|
<1>:<del> device = self.CxB.device
<2>:<del> transform = lambda x: F.transform(x.to(device), from_order="row", to_order=self.formatB)[0].to(x.device)
<3>:<del> with torch.no_grad():
<4>:<del> self._tile_indices = get_inverse_transform_indices(transform, self.get_tile_size()).to(device)
<5>:<add> self._tile_indices = get_tile_inds(self.formatB, self.CxB.device)
|
# module: bitsandbytes.autograd._functions
@dataclass
class MatmulLtState:
@property
def tile_indices(self):
<0> if self._tile_indices is None:
<1> device = self.CxB.device
<2> transform = lambda x: F.transform(x.to(device), from_order="row", to_order=self.formatB)[0].to(x.device)
<3> with torch.no_grad():
<4> self._tile_indices = get_inverse_transform_indices(transform, self.get_tile_size()).to(device)
<5> return self._tile_indices
<6>
|
===========unchanged ref 0===========
at: bitsandbytes.autograd._functions.MatmulLtState
_tile_indices: Optional[torch.Tensor] = None
force_no_igemmlt: bool = False
CB = None
CxB = None
SB = None
SCB = None
CxBt = None
SBt = None
CBt = None
subB = None
outlier_pool = None
has_accumulated_gradients = False
threshold = 0.0
idx = None
is_training = True
has_fp16_weights = True
memory_efficient_backward = False
use_pool = False
formatB = F.get_special_format_str()
===========changed ref 0===========
# module: bitsandbytes.autograd._functions
@dataclass
class MatmulLtState:
- def get_tile_size(self):
- assert self.formatB in (
- "col_turing",
- "col_ampere",
- ), f"please find this assert and manually enter tile size for {self.formatB}"
- return (8, 32) if self.formatB == "col_turing" else (32, 32)
-
===========changed ref 1===========
# module: bitsandbytes.autograd._functions
+ def _get_tile_size(format):
+ assert format in (
+ "col_turing",
+ "col_ampere",
+ ), f"please find this assert and manually enter tile size for {format}"
+ return (8, 32) if format == "col_turing" else (32, 32)
+
===========changed ref 2===========
# module: bitsandbytes.autograd._functions
+ def get_tile_inds(format, device):
+ transform = lambda x: F.transform(x.to(device), from_order="row", to_order=format)[0].to(x.device)
+ with torch.no_grad():
+ return get_inverse_transform_indices(transform, _get_tile_size(format)).to(device)
+
|
bitsandbytes.nn.modules/Embedding.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
5f492d437e4b156082a20c7fbb95422b7ac36c7d
|
Merge remote-tracking branch 'origin/inference'
|
<9>:<add> device=device
|
<s>torch.nn.Embedding):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: float = 2.0,
scale_grad_by_freq: bool = False,
sparse: bool = False,
_weight: Optional[Tensor] = None,
+ device: Optional[device] = None,
) -> None:
<0> super().__init__(
<1> num_embeddings,
<2> embedding_dim,
<3> padding_idx,
<4> max_norm,
<5> norm_type,
<6> scale_grad_by_freq,
<7> sparse,
<8> _weight,
<9> )
<10> GlobalOptimManager.get_instance().register_module_override(
<11> self, "weight", {"optim_bits": 32}
<12> )
<13>
|
===========unchanged ref 0===========
at: bitsandbytes.optim.optimizer
GlobalOptimManager()
at: bitsandbytes.optim.optimizer.GlobalOptimManager
_instance = None
get_instance()
at: torch._C
device(device: Union[_device, _int, str])
device(type: str, index: _int)
at: torch.nn.modules.sparse.Embedding
__constants__ = ['num_embeddings', 'embedding_dim', 'padding_idx', 'max_norm',
'norm_type', 'scale_grad_by_freq', 'sparse']
num_embeddings: int
embedding_dim: int
padding_idx: Optional[int]
max_norm: Optional[float]
norm_type: float
scale_grad_by_freq: bool
weight: Tensor
freeze: bool
sparse: bool
__init__(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None, max_norm: Optional[float]=None, norm_type: float=2., scale_grad_by_freq: bool=False, sparse: bool=False, _weight: Optional[Tensor]=None, _freeze: bool=False, device=None, dtype=None) -> None
__init__(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None, max_norm: Optional[float]=None, norm_type: float=2., scale_grad_by_freq: bool=False, sparse: bool=False, _weight: Optional[Tensor]=None, _freeze: bool=False, device=None, dtype=None) -> None
===========changed ref 0===========
# module: bitsandbytes.autograd._functions
+ def _get_tile_size(format):
+ assert format in (
+ "col_turing",
+ "col_ampere",
+ ), f"please find this assert and manually enter tile size for {format}"
+ return (8, 32) if format == "col_turing" else (32, 32)
+
===========changed ref 1===========
# module: bitsandbytes.autograd._functions
+ def get_tile_inds(format, device):
+ transform = lambda x: F.transform(x.to(device), from_order="row", to_order=format)[0].to(x.device)
+ with torch.no_grad():
+ return get_inverse_transform_indices(transform, _get_tile_size(format)).to(device)
+
===========changed ref 2===========
# module: bitsandbytes.autograd._functions
@dataclass
class MatmulLtState:
- def get_tile_size(self):
- assert self.formatB in (
- "col_turing",
- "col_ampere",
- ), f"please find this assert and manually enter tile size for {self.formatB}"
- return (8, 32) if self.formatB == "col_turing" else (32, 32)
-
===========changed ref 3===========
# module: bitsandbytes.autograd._functions
@dataclass
class MatmulLtState:
@property
def tile_indices(self):
if self._tile_indices is None:
- device = self.CxB.device
- transform = lambda x: F.transform(x.to(device), from_order="row", to_order=self.formatB)[0].to(x.device)
- with torch.no_grad():
- self._tile_indices = get_inverse_transform_indices(transform, self.get_tile_size()).to(device)
+ self._tile_indices = get_tile_inds(self.formatB, self.CxB.device)
return self._tile_indices
|
bitsandbytes.nn.modules/Linear4bit.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
5f492d437e4b156082a20c7fbb95422b7ac36c7d
|
Merge remote-tracking branch 'origin/inference'
|
<0>:<add> super().__init__(input_features, output_features, bias, device)
<del> super().__init__(input_features, output_features, bias)
|
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4',device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4'):
<0> super().__init__(input_features, output_features, bias)
<1> self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
<2> self.compute_dtype = compute_dtype
<3>
|
===========unchanged ref 0===========
at: torch.nn.modules.linear
Linear(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None)
at: torch.nn.modules.linear.Linear
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
__init__(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) -> None
__init__(self, in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) -> None
===========changed ref 0===========
<s>torch.nn.Embedding):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: float = 2.0,
scale_grad_by_freq: bool = False,
sparse: bool = False,
_weight: Optional[Tensor] = None,
+ device: Optional[device] = None,
) -> None:
super().__init__(
num_embeddings,
embedding_dim,
padding_idx,
max_norm,
norm_type,
scale_grad_by_freq,
sparse,
_weight,
+ device=device
)
GlobalOptimManager.get_instance().register_module_override(
self, "weight", {"optim_bits": 32}
)
===========changed ref 1===========
# module: bitsandbytes.autograd._functions
+ def _get_tile_size(format):
+ assert format in (
+ "col_turing",
+ "col_ampere",
+ ), f"please find this assert and manually enter tile size for {format}"
+ return (8, 32) if format == "col_turing" else (32, 32)
+
===========changed ref 2===========
# module: bitsandbytes.autograd._functions
+ def get_tile_inds(format, device):
+ transform = lambda x: F.transform(x.to(device), from_order="row", to_order=format)[0].to(x.device)
+ with torch.no_grad():
+ return get_inverse_transform_indices(transform, _get_tile_size(format)).to(device)
+
===========changed ref 3===========
# module: bitsandbytes.autograd._functions
@dataclass
class MatmulLtState:
- def get_tile_size(self):
- assert self.formatB in (
- "col_turing",
- "col_ampere",
- ), f"please find this assert and manually enter tile size for {self.formatB}"
- return (8, 32) if self.formatB == "col_turing" else (32, 32)
-
===========changed ref 4===========
# module: bitsandbytes.autograd._functions
@dataclass
class MatmulLtState:
@property
def tile_indices(self):
if self._tile_indices is None:
- device = self.CxB.device
- transform = lambda x: F.transform(x.to(device), from_order="row", to_order=self.formatB)[0].to(x.device)
- with torch.no_grad():
- self._tile_indices = get_inverse_transform_indices(transform, self.get_tile_size()).to(device)
+ self._tile_indices = get_tile_inds(self.formatB, self.CxB.device)
return self._tile_indices
|
bitsandbytes.nn.modules/LinearFP4.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
5f492d437e4b156082a20c7fbb95422b7ac36c7d
|
Merge remote-tracking branch 'origin/inference'
|
<0>:<add> super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4', device)
<del> super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
|
# module: bitsandbytes.nn.modules
class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True,device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
<0> super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
<1>
|
===========unchanged ref 0===========
at: bitsandbytes.nn.modules
Linear4bit(input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4', device=None)
===========changed ref 0===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4',device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4'):
+ super().__init__(input_features, output_features, bias, device)
- super().__init__(input_features, output_features, bias)
self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
self.compute_dtype = compute_dtype
===========changed ref 1===========
<s>torch.nn.Embedding):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: float = 2.0,
scale_grad_by_freq: bool = False,
sparse: bool = False,
_weight: Optional[Tensor] = None,
+ device: Optional[device] = None,
) -> None:
super().__init__(
num_embeddings,
embedding_dim,
padding_idx,
max_norm,
norm_type,
scale_grad_by_freq,
sparse,
_weight,
+ device=device
)
GlobalOptimManager.get_instance().register_module_override(
self, "weight", {"optim_bits": 32}
)
===========changed ref 2===========
# module: bitsandbytes.autograd._functions
+ def _get_tile_size(format):
+ assert format in (
+ "col_turing",
+ "col_ampere",
+ ), f"please find this assert and manually enter tile size for {format}"
+ return (8, 32) if format == "col_turing" else (32, 32)
+
===========changed ref 3===========
# module: bitsandbytes.autograd._functions
+ def get_tile_inds(format, device):
+ transform = lambda x: F.transform(x.to(device), from_order="row", to_order=format)[0].to(x.device)
+ with torch.no_grad():
+ return get_inverse_transform_indices(transform, _get_tile_size(format)).to(device)
+
===========changed ref 4===========
# module: bitsandbytes.autograd._functions
@dataclass
class MatmulLtState:
- def get_tile_size(self):
- assert self.formatB in (
- "col_turing",
- "col_ampere",
- ), f"please find this assert and manually enter tile size for {self.formatB}"
- return (8, 32) if self.formatB == "col_turing" else (32, 32)
-
===========changed ref 5===========
# module: bitsandbytes.autograd._functions
@dataclass
class MatmulLtState:
@property
def tile_indices(self):
if self._tile_indices is None:
- device = self.CxB.device
- transform = lambda x: F.transform(x.to(device), from_order="row", to_order=self.formatB)[0].to(x.device)
- with torch.no_grad():
- self._tile_indices = get_inverse_transform_indices(transform, self.get_tile_size()).to(device)
+ self._tile_indices = get_tile_inds(self.formatB, self.CxB.device)
return self._tile_indices
|
bitsandbytes.nn.modules/LinearNF4.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
5f492d437e4b156082a20c7fbb95422b7ac36c7d
|
Merge remote-tracking branch 'origin/inference'
|
<0>:<add> super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4', device)
<del> super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
|
# module: bitsandbytes.nn.modules
class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True,device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
<0> super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
<1>
|
===========unchanged ref 0===========
at: bitsandbytes.nn.modules
Linear4bit(input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4', device=None)
===========changed ref 0===========
# module: bitsandbytes.nn.modules
class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True,device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4', device)
- super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
===========changed ref 1===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4',device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4'):
+ super().__init__(input_features, output_features, bias, device)
- super().__init__(input_features, output_features, bias)
self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
self.compute_dtype = compute_dtype
===========changed ref 2===========
<s>torch.nn.Embedding):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: float = 2.0,
scale_grad_by_freq: bool = False,
sparse: bool = False,
_weight: Optional[Tensor] = None,
+ device: Optional[device] = None,
) -> None:
super().__init__(
num_embeddings,
embedding_dim,
padding_idx,
max_norm,
norm_type,
scale_grad_by_freq,
sparse,
_weight,
+ device=device
)
GlobalOptimManager.get_instance().register_module_override(
self, "weight", {"optim_bits": 32}
)
===========changed ref 3===========
# module: bitsandbytes.autograd._functions
+ def _get_tile_size(format):
+ assert format in (
+ "col_turing",
+ "col_ampere",
+ ), f"please find this assert and manually enter tile size for {format}"
+ return (8, 32) if format == "col_turing" else (32, 32)
+
===========changed ref 4===========
# module: bitsandbytes.autograd._functions
+ def get_tile_inds(format, device):
+ transform = lambda x: F.transform(x.to(device), from_order="row", to_order=format)[0].to(x.device)
+ with torch.no_grad():
+ return get_inverse_transform_indices(transform, _get_tile_size(format)).to(device)
+
===========changed ref 5===========
# module: bitsandbytes.autograd._functions
@dataclass
class MatmulLtState:
- def get_tile_size(self):
- assert self.formatB in (
- "col_turing",
- "col_ampere",
- ), f"please find this assert and manually enter tile size for {self.formatB}"
- return (8, 32) if self.formatB == "col_turing" else (32, 32)
-
===========changed ref 6===========
# module: bitsandbytes.autograd._functions
@dataclass
class MatmulLtState:
@property
def tile_indices(self):
if self._tile_indices is None:
- device = self.CxB.device
- transform = lambda x: F.transform(x.to(device), from_order="row", to_order=self.formatB)[0].to(x.device)
- with torch.no_grad():
- self._tile_indices = get_inverse_transform_indices(transform, self.get_tile_size()).to(device)
+ self._tile_indices = get_tile_inds(self.formatB, self.CxB.device)
return self._tile_indices
|
bitsandbytes.nn.modules/Linear8bitLt.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
5f492d437e4b156082a20c7fbb95422b7ac36c7d
|
Merge remote-tracking branch 'origin/inference'
|
<0>:<add> super().__init__(input_features, output_features, bias, device)
<del> super().__init__(input_features, output_features, bias)
<12>:<add> self._register_load_state_dict_pre_hook(maybe_rearrange_weight)
|
# module: bitsandbytes.nn.modules
class Linear8bitLt(nn.Linear):
def __init__(self, input_features, output_features, bias=True, has_fp16_weights=True,
+ memory_efficient_backward=False, threshold=0.0, index=None, device=None):
- memory_efficient_backward=False, threshold=0.0, index=None):
<0> super().__init__(input_features, output_features, bias)
<1> assert not memory_efficient_backward, "memory_efficient_backward is no longer required and the argument is deprecated in 0.37.0 and will be removed in 0.39.0"
<2> self.state = bnb.MatmulLtState()
<3> self.index = index
<4>
<5> self.state.threshold = threshold
<6> self.state.has_fp16_weights = has_fp16_weights
<7> self.state.memory_efficient_backward = memory_efficient_backward
<8> if threshold > 0.0 and not has_fp16_weights:
<9> self.state.use_pool = True
<10>
<11> self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights)
<12>
|
===========unchanged ref 0===========
at: bitsandbytes.autograd._functions
undo_layout(permuted_tensor: torch.Tensor, tile_indices: torch.LongTensor) -> torch.Tensor
get_tile_inds(format, device)
at: torch.nn.modules.linear
Linear(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None)
at: torch.nn.modules.linear.Linear
__init__(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) -> None
__init__(self, in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) -> None
===========changed ref 0===========
# module: bitsandbytes.autograd._functions
+ def get_tile_inds(format, device):
+ transform = lambda x: F.transform(x.to(device), from_order="row", to_order=format)[0].to(x.device)
+ with torch.no_grad():
+ return get_inverse_transform_indices(transform, _get_tile_size(format)).to(device)
+
===========changed ref 1===========
# module: bitsandbytes.nn.modules
+ def maybe_rearrange_weight(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
+ weight = state_dict.get(f"{prefix}weight")
+ if weight is None:
+ # if the state dict has no weights for this layer (e.g., LoRA finetuning), do nothing
+ return
+ weight_format = state_dict.pop(f"{prefix}weight_format", "row")
+
+ if weight_format != "row":
+ tile_indices = get_tile_inds(weight_format, weight.device)
+ state_dict[f"{prefix}weight"] = undo_layout(weight, tile_indices)
+
===========changed ref 2===========
# module: bitsandbytes.nn.modules
class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True,device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4', device)
- super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
===========changed ref 3===========
# module: bitsandbytes.nn.modules
class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True,device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4', device)
- super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
===========changed ref 4===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4',device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4'):
+ super().__init__(input_features, output_features, bias, device)
- super().__init__(input_features, output_features, bias)
self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
self.compute_dtype = compute_dtype
===========changed ref 5===========
<s>torch.nn.Embedding):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: float = 2.0,
scale_grad_by_freq: bool = False,
sparse: bool = False,
_weight: Optional[Tensor] = None,
+ device: Optional[device] = None,
) -> None:
super().__init__(
num_embeddings,
embedding_dim,
padding_idx,
max_norm,
norm_type,
scale_grad_by_freq,
sparse,
_weight,
+ device=device
)
GlobalOptimManager.get_instance().register_module_override(
self, "weight", {"optim_bits": 32}
)
===========changed ref 6===========
# module: bitsandbytes.autograd._functions
+ def _get_tile_size(format):
+ assert format in (
+ "col_turing",
+ "col_ampere",
+ ), f"please find this assert and manually enter tile size for {format}"
+ return (8, 32) if format == "col_turing" else (32, 32)
+
===========changed ref 7===========
# module: bitsandbytes.autograd._functions
@dataclass
class MatmulLtState:
- def get_tile_size(self):
- assert self.formatB in (
- "col_turing",
- "col_ampere",
- ), f"please find this assert and manually enter tile size for {self.formatB}"
- return (8, 32) if self.formatB == "col_turing" else (32, 32)
-
===========changed ref 8===========
# module: bitsandbytes.autograd._functions
@dataclass
class MatmulLtState:
@property
def tile_indices(self):
if self._tile_indices is None:
- device = self.CxB.device
- transform = lambda x: F.transform(x.to(device), from_order="row", to_order=self.formatB)[0].to(x.device)
- with torch.no_grad():
- self._tile_indices = get_inverse_transform_indices(transform, self.get_tile_size()).to(device)
+ self._tile_indices = get_tile_inds(self.formatB, self.CxB.device)
return self._tile_indices
|
bitsandbytes.nn.modules/Linear8bitLt._save_to_state_dict
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
5f492d437e4b156082a20c7fbb95422b7ac36c7d
|
Merge remote-tracking branch 'origin/inference'
|
<0>:<del> if not self.state.has_fp16_weights and self.state.CB is None and self.state.CxB is not None:
<1>:<del> # reorder weight layout back from ampere/turing to row
<2>:<del> reorder_layout = True
<3>:<del> weight_clone = self.weight.data.clone()
<4>:<del> else:
<5>:<del> reorder_layout = False
<6>:<add> super()._save_to_state_dict(destination, prefix, keep_vars)
<7>:<del> try:
<8>:<del> if reorder_layout:
<9>:<del> self.weight.data = undo_layout(self.state.CxB, self.state.tile_indices)
<10>:<add> # we only need to save SCB as extra data, because CB for quantized weights is already stored in weight.data
<add> scb_name = "SCB"
<11>:<add> # case 1: .cuda was called, SCB is in self.weight
<add> param_from_weight = getattr(self.weight, scb_name)
<add> # case 2: self.init_8bit_state was called, SCB is in self.state
<add> param_from_state = getattr(self.state, scb_name)
<add> # case 3: SCB is in self.state, weight layout reordered after first forward()
<add> layout_reordered = self.state.CxB is not None
<del> super()._save_to_state_dict(destination, prefix, keep_vars)
<13>:<add> key_name = prefix + f"{scb_name}"
<add> format_name = prefix + "weight_format"
<del> # we only need to save SCB as extra data, because CB for quantized weights is already stored in weight.data
<14>:<del> weight_name = "SCB"
<16>:<del> # case 1: .cuda was called, SCB is in self.weight
<17>:<del> param_from_weight = getattr(self.weight, weight_name)
<18>:<del> # case 2: self.init_8bit_state was called, SCB is in self.state
<19>:<del> param_from_state = getattr(self.state, weight_name)
<20>:<del>
|
# module: bitsandbytes.nn.modules
class Linear8bitLt(nn.Linear):
def _save_to_state_dict(self, destination, prefix, keep_vars):
<0> if not self.state.has_fp16_weights and self.state.CB is None and self.state.CxB is not None:
<1> # reorder weight layout back from ampere/turing to row
<2> reorder_layout = True
<3> weight_clone = self.weight.data.clone()
<4> else:
<5> reorder_layout = False
<6>
<7> try:
<8> if reorder_layout:
<9> self.weight.data = undo_layout(self.state.CxB, self.state.tile_indices)
<10>
<11> super()._save_to_state_dict(destination, prefix, keep_vars)
<12>
<13> # we only need to save SCB as extra data, because CB for quantized weights is already stored in weight.data
<14> weight_name = "SCB"
<15>
<16> # case 1: .cuda was called, SCB is in self.weight
<17> param_from_weight = getattr(self.weight, weight_name)
<18> # case 2: self.init_8bit_state was called, SCB is in self.state
<19> param_from_state = getattr(self.state, weight_name)
<20>
<21> key_name = prefix + f"{weight_name}"
<22> if param_from_weight is not None:
<23> destination[key_name] = param_from_weight if keep_vars else param_from_weight.detach()
<24> elif not self.state.has_fp16_weights and param_from_state is not None:
<25> destination[key_name] = param_from_state if keep_vars else param_from_state.detach()
<26> finally:
<27> if reorder_layout:
<28> self.weight.data = weight_clone
<29>
|
===========unchanged ref 0===========
at: bitsandbytes.autograd._functions
MatmulLtState(_tile_indices: Optional[torch.Tensor]=None, force_no_igemmlt: bool=False)
at: bitsandbytes.autograd._functions.MatmulLtState
_tile_indices: Optional[torch.Tensor] = None
force_no_igemmlt: bool = False
CB = None
CxB = None
SB = None
SCB = None
CxBt = None
SBt = None
CBt = None
subB = None
outlier_pool = None
has_accumulated_gradients = False
threshold = 0.0
idx = None
is_training = True
has_fp16_weights = True
memory_efficient_backward = False
use_pool = False
formatB = F.get_special_format_str()
at: bitsandbytes.autograd._functions.MatmulLtState.reset_grads
self.CxB = None
at: bitsandbytes.nn.modules
Int8Params(data: Tensor=..., requires_grad: builtins.bool=...)
maybe_rearrange_weight(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
at: bitsandbytes.nn.modules.Int8Params.cuda
self.data = CB
at: torch.nn.modules.linear.Linear
__init__(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) -> None
__init__(self, in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) -> None
at: torch.nn.modules.module.Module
dump_patches: bool = False
_version: int = 1
training: bool
_parameters: Dict[str, Optional[Parameter]]
===========unchanged ref 1===========
_buffers: Dict[str, Optional[Tensor]]
_non_persistent_buffers_set: Set[str]
_backward_pre_hooks: Dict[int, Callable]
_backward_hooks: Dict[int, Callable]
_is_full_backward_hook: Optional[bool]
_forward_hooks: Dict[int, Callable]
_forward_hooks_with_kwargs: Dict[int, bool]
_forward_hooks_always_called: Dict[int, bool]
_forward_pre_hooks: Dict[int, Callable]
_forward_pre_hooks_with_kwargs: Dict[int, bool]
_state_dict_hooks: Dict[int, Callable]
_load_state_dict_pre_hooks: Dict[int, Callable]
_state_dict_pre_hooks: Dict[int, Callable]
_load_state_dict_post_hooks: Dict[int, Callable]
_modules: Dict[str, Optional['Module']]
call_super_init: bool = False
_compiled_call_impl : Optional[Callable] = None
forward: Callable[..., Any] = _forward_unimplemented
__call__ : Callable[..., Any] = _wrapped_call_impl
_save_to_state_dict(self, destination, prefix, keep_vars)
_save_to_state_dict(destination, prefix, keep_vars)
T_destination = TypeVar('T_destination', bound=Dict[str, Any])
_register_load_state_dict_pre_hook(hook, with_module=False)
===========changed ref 0===========
# module: bitsandbytes.nn.modules
+ def maybe_rearrange_weight(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
+ weight = state_dict.get(f"{prefix}weight")
+ if weight is None:
+ # if the state dict has no weights for this layer (e.g., LoRA finetuning), do nothing
+ return
+ weight_format = state_dict.pop(f"{prefix}weight_format", "row")
+
+ if weight_format != "row":
+ tile_indices = get_tile_inds(weight_format, weight.device)
+ state_dict[f"{prefix}weight"] = undo_layout(weight, tile_indices)
+
===========changed ref 1===========
# module: bitsandbytes.nn.modules
class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True,device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4', device)
- super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
===========changed ref 2===========
# module: bitsandbytes.nn.modules
class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True,device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4', device)
- super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
===========changed ref 3===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4',device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4'):
+ super().__init__(input_features, output_features, bias, device)
- super().__init__(input_features, output_features, bias)
self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
self.compute_dtype = compute_dtype
===========changed ref 4===========
# module: bitsandbytes.nn.modules
class Linear8bitLt(nn.Linear):
def __init__(self, input_features, output_features, bias=True, has_fp16_weights=True,
+ memory_efficient_backward=False, threshold=0.0, index=None, device=None):
- memory_efficient_backward=False, threshold=0.0, index=None):
+ super().__init__(input_features, output_features, bias, device)
- super().__init__(input_features, output_features, bias)
assert not memory_efficient_backward, "memory_efficient_backward is no longer required and the argument is deprecated in 0.37.0 and will be removed in 0.39.0"
self.state = bnb.MatmulLtState()
self.index = index
self.state.threshold = threshold
self.state.has_fp16_weights = has_fp16_weights
self.state.memory_efficient_backward = memory_efficient_backward
if threshold > 0.0 and not has_fp16_weights:
self.state.use_pool = True
self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights)
+ self._register_load_state_dict_pre_hook(maybe_rearrange_weight)
|
bitsandbytes.nn.modules/Linear8bitLt._load_from_state_dict
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
5f492d437e4b156082a20c7fbb95422b7ac36c7d
|
Merge remote-tracking branch 'origin/inference'
|
<2>:<add> unexpected_copy = list(unexpected_keys)
<add>
<add> for key in unexpected_copy:
<del> for key in unexpected_keys:
<6>:<add> # buffers not yet initialized, can't access them directly without quantizing first
<del> # buffers not yet initialized, can't call them directly without
<12>:<add>
<add> if self.state.SCB is not None:
<add> self.state.SCB = self.weight.SCB
<add>
|
# module: bitsandbytes.nn.modules
class Linear8bitLt(nn.Linear):
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
<0> super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys,
<1> error_msgs)
<2> for key in unexpected_keys:
<3> input_name = key[len(prefix):]
<4> if input_name == "SCB":
<5> if self.weight.SCB is None:
<6> # buffers not yet initialized, can't call them directly without
<7> raise RuntimeError("Loading a quantized checkpoint into non-quantized Linear8bitLt is "
<8> "not supported. Please call module.cuda() before module.load_state_dict()")
<9>
<10> input_param = state_dict[key]
<11> self.weight.SCB.copy_(input_param)
<12> unexpected_keys.remove(key)
<13>
|
===========unchanged ref 0===========
at: bitsandbytes.autograd._functions.MatmulLtState
formatB = F.get_special_format_str()
at: bitsandbytes.nn.modules.Linear8bitLt.__init__
self.state = bnb.MatmulLtState()
at: bitsandbytes.nn.modules.Linear8bitLt._save_to_state_dict
param_from_weight = getattr(self.weight, scb_name)
param_from_state = getattr(self.state, scb_name)
layout_reordered = self.state.CxB is not None
key_name = prefix + f"{scb_name}"
format_name = prefix + "weight_format"
at: torch.nn.modules.module.Module
state_dict(self, *, prefix: str=..., keep_vars: bool=...) -> Dict[str, Any]
state_dict(self, *, destination: T_destination, prefix: str=..., keep_vars: bool=...) -> T_destination
_load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
_load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
===========changed ref 0===========
# module: bitsandbytes.nn.modules
+ def maybe_rearrange_weight(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
+ weight = state_dict.get(f"{prefix}weight")
+ if weight is None:
+ # if the state dict has no weights for this layer (e.g., LoRA finetuning), do nothing
+ return
+ weight_format = state_dict.pop(f"{prefix}weight_format", "row")
+
+ if weight_format != "row":
+ tile_indices = get_tile_inds(weight_format, weight.device)
+ state_dict[f"{prefix}weight"] = undo_layout(weight, tile_indices)
+
===========changed ref 1===========
# module: bitsandbytes.nn.modules
class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True,device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4', device)
- super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
===========changed ref 2===========
# module: bitsandbytes.nn.modules
class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True,device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4', device)
- super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
===========changed ref 3===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4',device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4'):
+ super().__init__(input_features, output_features, bias, device)
- super().__init__(input_features, output_features, bias)
self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
self.compute_dtype = compute_dtype
===========changed ref 4===========
# module: bitsandbytes.nn.modules
class Linear8bitLt(nn.Linear):
def __init__(self, input_features, output_features, bias=True, has_fp16_weights=True,
+ memory_efficient_backward=False, threshold=0.0, index=None, device=None):
- memory_efficient_backward=False, threshold=0.0, index=None):
+ super().__init__(input_features, output_features, bias, device)
- super().__init__(input_features, output_features, bias)
assert not memory_efficient_backward, "memory_efficient_backward is no longer required and the argument is deprecated in 0.37.0 and will be removed in 0.39.0"
self.state = bnb.MatmulLtState()
self.index = index
self.state.threshold = threshold
self.state.has_fp16_weights = has_fp16_weights
self.state.memory_efficient_backward = memory_efficient_backward
if threshold > 0.0 and not has_fp16_weights:
self.state.use_pool = True
self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights)
+ self._register_load_state_dict_pre_hook(maybe_rearrange_weight)
===========changed ref 5===========
<s>torch.nn.Embedding):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: float = 2.0,
scale_grad_by_freq: bool = False,
sparse: bool = False,
_weight: Optional[Tensor] = None,
+ device: Optional[device] = None,
) -> None:
super().__init__(
num_embeddings,
embedding_dim,
padding_idx,
max_norm,
norm_type,
scale_grad_by_freq,
sparse,
_weight,
+ device=device
)
GlobalOptimManager.get_instance().register_module_override(
self, "weight", {"optim_bits": 32}
)
|
bitsandbytes.nn.modules/OutlierAwareLinear.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
5f492d437e4b156082a20c7fbb95422b7ac36c7d
|
Merge remote-tracking branch 'origin/inference'
|
<0>:<add> super().__init__(input_features, output_features, bias, device)
<del> super().__init__(input_features, output_features, bias)
|
# module: bitsandbytes.nn.modules
class OutlierAwareLinear(nn.Linear):
+ def __init__(self, input_features, output_features, bias=True, device=None):
- def __init__(self, input_features, output_features, bias=True):
<0> super().__init__(input_features, output_features, bias)
<1> self.outlier_dim = None
<2> self.is_quantized = False
<3>
|
===========unchanged ref 0===========
at: torch._tensor.Tensor.__setstate__
self.data = state[0]
at: torch.nn.modules.linear.Linear.__init__
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
===========changed ref 0===========
# module: bitsandbytes.nn.modules
+ def maybe_rearrange_weight(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
+ weight = state_dict.get(f"{prefix}weight")
+ if weight is None:
+ # if the state dict has no weights for this layer (e.g., LoRA finetuning), do nothing
+ return
+ weight_format = state_dict.pop(f"{prefix}weight_format", "row")
+
+ if weight_format != "row":
+ tile_indices = get_tile_inds(weight_format, weight.device)
+ state_dict[f"{prefix}weight"] = undo_layout(weight, tile_indices)
+
===========changed ref 1===========
# module: bitsandbytes.nn.modules
class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True,device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4', device)
- super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
===========changed ref 2===========
# module: bitsandbytes.nn.modules
class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True,device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4', device)
- super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
===========changed ref 3===========
# module: bitsandbytes.nn.modules
class Linear8bitLt(nn.Linear):
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys,
error_msgs)
+ unexpected_copy = list(unexpected_keys)
+
+ for key in unexpected_copy:
- for key in unexpected_keys:
input_name = key[len(prefix):]
if input_name == "SCB":
if self.weight.SCB is None:
+ # buffers not yet initialized, can't access them directly without quantizing first
- # buffers not yet initialized, can't call them directly without
raise RuntimeError("Loading a quantized checkpoint into non-quantized Linear8bitLt is "
"not supported. Please call module.cuda() before module.load_state_dict()")
input_param = state_dict[key]
self.weight.SCB.copy_(input_param)
+
+ if self.state.SCB is not None:
+ self.state.SCB = self.weight.SCB
+
unexpected_keys.remove(key)
===========changed ref 4===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4',device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4'):
+ super().__init__(input_features, output_features, bias, device)
- super().__init__(input_features, output_features, bias)
self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
self.compute_dtype = compute_dtype
===========changed ref 5===========
# module: bitsandbytes.nn.modules
class Linear8bitLt(nn.Linear):
def __init__(self, input_features, output_features, bias=True, has_fp16_weights=True,
+ memory_efficient_backward=False, threshold=0.0, index=None, device=None):
- memory_efficient_backward=False, threshold=0.0, index=None):
+ super().__init__(input_features, output_features, bias, device)
- super().__init__(input_features, output_features, bias)
assert not memory_efficient_backward, "memory_efficient_backward is no longer required and the argument is deprecated in 0.37.0 and will be removed in 0.39.0"
self.state = bnb.MatmulLtState()
self.index = index
self.state.threshold = threshold
self.state.has_fp16_weights = has_fp16_weights
self.state.memory_efficient_backward = memory_efficient_backward
if threshold > 0.0 and not has_fp16_weights:
self.state.use_pool = True
self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights)
+ self._register_load_state_dict_pre_hook(maybe_rearrange_weight)
===========changed ref 6===========
<s>torch.nn.Embedding):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: float = 2.0,
scale_grad_by_freq: bool = False,
sparse: bool = False,
_weight: Optional[Tensor] = None,
+ device: Optional[device] = None,
) -> None:
super().__init__(
num_embeddings,
embedding_dim,
padding_idx,
max_norm,
norm_type,
scale_grad_by_freq,
sparse,
_weight,
+ device=device
)
GlobalOptimManager.get_instance().register_module_override(
self, "weight", {"optim_bits": 32}
)
|
bitsandbytes.nn.modules/SwitchBackLinearBnb.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
5f492d437e4b156082a20c7fbb95422b7ac36c7d
|
Merge remote-tracking branch 'origin/inference'
|
<1>:<add> input_features, output_features, bias, device
<del> input_features, output_features, bias
|
# module: bitsandbytes.nn.modules
class SwitchBackLinearBnb(nn.Linear):
def __init__(
self,
input_features,
output_features,
bias=True,
has_fp16_weights=True,
memory_efficient_backward=False,
threshold=0.0,
index=None,
+ device=None
):
<0> super().__init__(
<1> input_features, output_features, bias
<2> )
<3> self.state = bnb.MatmulLtState()
<4> self.index = index
<5>
<6> self.state.threshold = threshold
<7> self.state.has_fp16_weights = has_fp16_weights
<8> self.state.memory_efficient_backward = memory_efficient_backward
<9> if threshold > 0.0 and not has_fp16_weights:
<10> self.state.use_pool = True
<11>
<12> self.weight = Int8Params(
<13> self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights
<14> )
<15>
|
===========unchanged ref 0===========
at: bitsandbytes.nn.modules.OutlierAwareLinear
quantize_weight(w, outlier_idx)
at: bitsandbytes.nn.modules.OutlierAwareLinear.__init__
self.outlier_dim = None
self.is_quantized = False
at: bitsandbytes.utils
OutlierTracer()
at: bitsandbytes.utils.OutlierTracer
_instance = None
get_instance()
at: torch._tensor.Tensor.__setstate__
self.data = state[0]
at: torch.nn.modules.linear
Linear(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None)
at: torch.nn.modules.linear.Linear
__init__(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) -> None
__init__(self, in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) -> None
forward(self, input: Tensor) -> Tensor
at: torch.nn.modules.linear.Linear.__init__
self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs))
===========changed ref 0===========
# module: bitsandbytes.nn.modules
class OutlierAwareLinear(nn.Linear):
+ def __init__(self, input_features, output_features, bias=True, device=None):
- def __init__(self, input_features, output_features, bias=True):
+ super().__init__(input_features, output_features, bias, device)
- super().__init__(input_features, output_features, bias)
self.outlier_dim = None
self.is_quantized = False
===========changed ref 1===========
# module: bitsandbytes.nn.modules
+ def maybe_rearrange_weight(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
+ weight = state_dict.get(f"{prefix}weight")
+ if weight is None:
+ # if the state dict has no weights for this layer (e.g., LoRA finetuning), do nothing
+ return
+ weight_format = state_dict.pop(f"{prefix}weight_format", "row")
+
+ if weight_format != "row":
+ tile_indices = get_tile_inds(weight_format, weight.device)
+ state_dict[f"{prefix}weight"] = undo_layout(weight, tile_indices)
+
===========changed ref 2===========
# module: bitsandbytes.nn.modules
class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True,device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4', device)
- super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4')
===========changed ref 3===========
# module: bitsandbytes.nn.modules
class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True,device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4', device)
- super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4')
===========changed ref 4===========
# module: bitsandbytes.nn.modules
class Linear8bitLt(nn.Linear):
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys,
error_msgs)
+ unexpected_copy = list(unexpected_keys)
+
+ for key in unexpected_copy:
- for key in unexpected_keys:
input_name = key[len(prefix):]
if input_name == "SCB":
if self.weight.SCB is None:
+ # buffers not yet initialized, can't access them directly without quantizing first
- # buffers not yet initialized, can't call them directly without
raise RuntimeError("Loading a quantized checkpoint into non-quantized Linear8bitLt is "
"not supported. Please call module.cuda() before module.load_state_dict()")
input_param = state_dict[key]
self.weight.SCB.copy_(input_param)
+
+ if self.state.SCB is not None:
+ self.state.SCB = self.weight.SCB
+
unexpected_keys.remove(key)
===========changed ref 5===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4',device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4'):
+ super().__init__(input_features, output_features, bias, device)
- super().__init__(input_features, output_features, bias)
self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
self.compute_dtype = compute_dtype
===========changed ref 6===========
# module: bitsandbytes.nn.modules
class Linear8bitLt(nn.Linear):
def __init__(self, input_features, output_features, bias=True, has_fp16_weights=True,
+ memory_efficient_backward=False, threshold=0.0, index=None, device=None):
- memory_efficient_backward=False, threshold=0.0, index=None):
+ super().__init__(input_features, output_features, bias, device)
- super().__init__(input_features, output_features, bias)
assert not memory_efficient_backward, "memory_efficient_backward is no longer required and the argument is deprecated in 0.37.0 and will be removed in 0.39.0"
self.state = bnb.MatmulLtState()
self.index = index
self.state.threshold = threshold
self.state.has_fp16_weights = has_fp16_weights
self.state.memory_efficient_backward = memory_efficient_backward
if threshold > 0.0 and not has_fp16_weights:
self.state.use_pool = True
self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights)
+ self._register_load_state_dict_pre_hook(maybe_rearrange_weight)
|
tests.test_functional/test_gemv_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
306f6b2362a8430bb407715ee5172a24893bad0f
|
Fixed accidential deletion of limits in kernel.
|
<15>:<add> #B = torch.randn(4, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<add> B = torch.randn(dim*4, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<del> B = torch.randn(4*dim, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<16>:<del> #B = torch.randn(1, dim+2, dtype=dtype, device='cuda')/math.sqrt(dim)
|
<s>etrize("double_quant", [True, False], ids=['DQ_True', 'DQ_False'])
@pytest.mark.parametrize("storage_type", ['nf4', 'fp4'], ids=['nf4', 'fp4'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant):
<0> print('')
<1> for dim in [128, 256, 512, 1024, 2048, 4096]:
<2> #for dim in [4*1024]:
<3> #for dim in [1*16]:
<4> errs = []
<5> relerrs = []
<6> max_err = 0
<7> max_relerr = 0
<8>
<9> for i in range(100):
<10> #A = torch.rand(2, 4092, dtype=dtype, device='cuda')
<11> #B = torch.rand(4*4092, 4092, dtype=dtype, device='cuda')
<12> #A = torch.rand(1, 4096, dtype=dtype, device='cuda')
<13> #B = torch.rand(4*4096, 4096, dtype=dtype, device='cuda')
<14> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<15> B = torch.randn(4*dim, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<16> #B = torch.randn(1, dim+2, dtype=dtype, device='cuda')/math.sqrt(dim)
<17>
<18> #print('')
<19> #print(A)
<20> #print(B.t())
<21> #A[:, :-1] = 0
<22> #B[:, :-1] = 0
<23> #A.flatten()[:-1] = 0
<24> #B.flatten()[:-1] = 0
<25>
<26> qB, state = F.quantize_4bit(B,</s>
|
===========below chunk 0===========
<s>quant", [True, False], ids=['DQ_True', 'DQ_False'])
@pytest.mark.parametrize("storage_type", ['nf4', 'fp4'], ids=['nf4', 'fp4'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant):
# offset: 1
#F.dequantize_4bit(qB, state)
C3 = torch.matmul(A, B.t())
C2 = F.gemv_4bit(A, qB.t(), state=state)
A.requires_grad = True
C1 = bnb.matmul_4bit(A, qB.t(), state)
#print(state)
#print(qB)
#print('')
#print(A)
#print(B)
#print('='*89)
#print(C3)
#print(C1.shape, C2.shape)
# tensor cores are non-deterministic
# so we need to analyze errors around the mean
# to test our implementation
err = torch.abs(C1-C2).float()
mag = torch.abs(C1).float()+1e-5
relerr = err/mag
max_err = max(err.max(), max_err)
max_relerr = max(relerr.max(), max_relerr)
err = err.mean().item()
relerr = relerr.mean().item()
#print(err)
errs.append(err)
relerrs.append(relerr)
c = int(C1.numel()*0.0014*(dim/256))+1
c = assert_all_approx_close(C1, C2, 1e-5, 0.01, count=c, throw=False)
#print('')</s>
===========below chunk 1===========
<s>quant", [True, False], ids=['DQ_True', 'DQ_False'])
@pytest.mark.parametrize("storage_type", ['nf4', 'fp4'], ids=['nf4', 'fp4'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant):
# offset: 2
<s>(C1, C2, 1e-5, 0.01, count=c, throw=False)
#print('')
#print(dim, sum(errs)/len(errs)/math.sqrt(dim))
#print(dim, sum(relerrs)/len(relerrs)/math.sqrt(dim))
#print(dim, (max_err.item(), max_relerr.item()))
print(C1.flatten()[-20:])
print(C2.flatten()[-20:])
print(C3.flatten()[-20:])
print(sum(errs)/len(errs)/math.sqrt(dim) , dim)
print(sum(relerrs)/len(relerrs)/math.sqrt(dim) , dim)
if dtype == torch.float16:
assert sum(errs)/len(errs)/math.sqrt(dim) < 5e-5
assert sum(relerrs)/len(relerrs)/math.sqrt(dim) < 0.0005
else:
assert sum(errs)/len(errs)/math.sqrt(dim) < 3e-4
assert sum(relerrs)/len(relerrs)/math.sqrt(dim) < 0.003
|
tests.test_functional/test_gemv_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
a26a321e07f0dc40c18745a7301dd2f3828fe99d
|
Removed debugging statement.
|
<s>etrize("double_quant", [True, False], ids=['DQ_True', 'DQ_False'])
@pytest.mark.parametrize("storage_type", ['nf4', 'fp4'], ids=['nf4', 'fp4'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant):
<0> print('')
<1> for dim in [128, 256, 512, 1024, 2048, 4096]:
<2> #for dim in [4*1024]:
<3> #for dim in [1*16]:
<4> errs = []
<5> relerrs = []
<6> max_err = 0
<7> max_relerr = 0
<8>
<9> for i in range(100):
<10> #A = torch.rand(2, 4092, dtype=dtype, device='cuda')
<11> #B = torch.rand(4*4092, 4092, dtype=dtype, device='cuda')
<12> #A = torch.rand(1, 4096, dtype=dtype, device='cuda')
<13> #B = torch.rand(4*4096, 4096, dtype=dtype, device='cuda')
<14> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<15> #B = torch.randn(4, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<16> B = torch.randn(dim*4, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<17>
<18> #print('')
<19> #print(A)
<20> #print(B.t())
<21> #A[:, :-1] = 0
<22> #B[:, :-1] = 0
<23> #A.flatten()[:-1] = 0
<24> #B.flatten()[:-1] = 0
<25>
<26> qB, state = F.quantize_4bit(B, quant_</s>
|
===========below chunk 0===========
<s>quant", [True, False], ids=['DQ_True', 'DQ_False'])
@pytest.mark.parametrize("storage_type", ['nf4', 'fp4'], ids=['nf4', 'fp4'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant):
# offset: 1
#F.dequantize_4bit(qB, state)
C3 = torch.matmul(A, B.t())
C2 = F.gemv_4bit(A, qB.t(), state=state)
A.requires_grad = True
C1 = bnb.matmul_4bit(A, qB.t(), state)
#print(state)
#print(qB)
#print('')
#print(A)
#print(B)
#print('='*89)
#print(C3)
#print(C1.shape, C2.shape)
# tensor cores are non-deterministic
# so we need to analyze errors around the mean
# to test our implementation
err = torch.abs(C1-C2).float()
mag = torch.abs(C1).float()+1e-5
relerr = err/mag
max_err = max(err.max(), max_err)
max_relerr = max(relerr.max(), max_relerr)
err = err.mean().item()
relerr = relerr.mean().item()
#print(err)
errs.append(err)
relerrs.append(relerr)
c = int(C1.numel()*0.0014*(dim/256))+1
c = assert_all_approx_close(C1, C2, 1e-5, 0.01, count=c, throw=False)
#print('')</s>
===========below chunk 1===========
<s>quant", [True, False], ids=['DQ_True', 'DQ_False'])
@pytest.mark.parametrize("storage_type", ['nf4', 'fp4'], ids=['nf4', 'fp4'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant):
# offset: 2
<s>(C1, C2, 1e-5, 0.01, count=c, throw=False)
#print('')
#print(dim, sum(errs)/len(errs)/math.sqrt(dim))
#print(dim, sum(relerrs)/len(relerrs)/math.sqrt(dim))
#print(dim, (max_err.item(), max_relerr.item()))
print(C1.flatten()[-20:])
print(C2.flatten()[-20:])
#print(C1.flatten())
#print(C2.flatten())
#print(C3.flatten()[-20:])
print(sum(errs)/len(errs)/math.sqrt(dim) , dim)
print(sum(relerrs)/len(relerrs)/math.sqrt(dim) , dim)
if dtype == torch.float16:
assert sum(errs)/len(errs)/math.sqrt(dim) < 5e-5
assert sum(relerrs)/len(relerrs)/math.sqrt(dim) < 0.0005
elif dtype == torch.float32:
assert sum(errs)/len(errs)/math.sqrt(dim) < 5e-8
assert sum(relerrs)/len(relerrs)/math.sqrt(dim) < 1e-8
elif dtype == torch.bfloat16:
assert sum(errs)/len(err</s>
===========below chunk 2===========
<s>quant", [True, False], ids=['DQ_True', 'DQ_False'])
@pytest.mark.parametrize("storage_type", ['nf4', 'fp4'], ids=['nf4', 'fp4'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant):
# offset: 3
<s>math.sqrt(dim) < 3e-4
assert sum(relerrs)/len(relerrs)/math.sqrt(dim) < 0.003
|
|
bitsandbytes.autograd._functions/matmul_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
ba51d95d433ef2cd10e1e4bf3e325d5b50004ff9
|
Added more extensive gemv tests; blocksize guard for gemv.
|
<2>:<add> absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = quant_state
<add> if A.shape[-1] % blocksize != 0:
<add> warn(f'Some matrices hidden dimension is not a multiple of {blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}')
<add> return MatMul4Bit.apply(A, B, out, bias, quant_state)
<add> else:
<add> return F.gemv_4bit(A, B.t(), out, state=quant_state)
<del> return F.gemv_4bit(A, B.t(), out, state=quant_state)
|
# module: bitsandbytes.autograd._functions
def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
<0> assert quant_state is not None
<1> if A.numel() == A.shape[-1] and A.requires_grad == False:
<2> return F.gemv_4bit(A, B.t(), out, state=quant_state)
<3> else:
<4> return MatMul4Bit.apply(A, B, out, bias, quant_state)
<5>
|
===========unchanged ref 0===========
at: bitsandbytes.autograd._functions
tensor = torch.Tensor
MatMul4Bit(*args, **kwargs)
at: bitsandbytes.functional
gemv_4bit(A: Tensor, B: Tensor, out: Tensor=None, transposed_A=False, transposed_B=False, state=None)
at: torch._tensor.Tensor.__setstate__
self.requires_grad, _, self._backward_hooks = state
at: typing
List = _alias(list, 1, inst=False, name='List')
|
tests.test_functional/test_fp4_quant
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
ba51d95d433ef2cd10e1e4bf3e325d5b50004ff9
|
Added more extensive gemv tests; blocksize guard for gemv.
|
<26>:<add> relerr = (err/(A1.abs().float()+1e-8)).mean()
<del> relerr = (err/A1.abs().float()).mean()
|
# module: tests.test_functional
#print((time.time()-t0)/1e6)
@pytest.mark.parametrize("dtype", [torch.float32, torch.float16, torch.bfloat16], ids=["fp32", "fp16", "bf16"])
def test_fp4_quant(dtype):
<0> vals = list(product([0, 1], repeat=4))
<1>
<2> code = {}
<3> for bits in vals:
<4> result = 0
<5> bias = 3
<6> sign, e1, e2, p1 = bits
<7> idx = sign*8 + e1*4 + e2*2 + p1*1
<8> sign = -1.0 if sign else 1.0
<9> exp = e1*2 + e2*1
<10> if exp == 0:
<11> # sub-normal
<12> if p1 == 0: result = 0
<13> else: result = sign*0.0625
<14> else:
<15> # normal
<16> exp = 2**(-exp + bias + 1)
<17> frac = 1.5 if p1 else 1.0
<18> result = sign*exp*frac
<19> code[idx] = result
<20>
<21> A1 = torch.randn(1024, 1024, device='cuda', dtype=dtype)
<22> qa, SA = F.quantize_fp4(A1, blocksize=64)
<23> A2 = F.dequantize_fp4(qa, SA)
<24>
<25> err = (A1 - A2).abs().float()
<26> relerr = (err/A1.abs().float()).mean()
<27> idx = err > 1.0
<28> err = err.mean()
<29>
<30> assert A2.dtype == dtype
<31> assert err.item() < 0.1
<32> assert relerr.item() < 0.28
<33>
|
===========changed ref 0===========
# module: bitsandbytes.autograd._functions
def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
assert quant_state is not None
if A.numel() == A.shape[-1] and A.requires_grad == False:
+ absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = quant_state
+ if A.shape[-1] % blocksize != 0:
+ warn(f'Some matrices hidden dimension is not a multiple of {blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}')
+ return MatMul4Bit.apply(A, B, out, bias, quant_state)
+ else:
+ return F.gemv_4bit(A, B.t(), out, state=quant_state)
- return F.gemv_4bit(A, B.t(), out, state=quant_state)
else:
return MatMul4Bit.apply(A, B, out, bias, quant_state)
|
tests.test_functional/test_gemv_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
ba51d95d433ef2cd10e1e4bf3e325d5b50004ff9
|
Added more extensive gemv tests; blocksize guard for gemv.
|
<0>:<del> print('')
<1>:<add> for dim in [128, 256, 512, 1024, 2048, 4096, 6144]:
<del> for dim in [128, 256, 512, 1024, 2048, 4096]:
<3>:<add> #for dim in [1*128]:
<del> #for dim in [1*16]:
<4>:<add> errs1 = []
<del> errs = []
<5>:<add> errs2 = []
<add> errs3 = []
<add> relerrs1 = []
<del> relerrs = []
<6>:<add> relerrs2 = []
<add> relerrs3 = []
<add> max_errs1 = []
<del> max_err = 0
<7>:<add> max_errs2 = []
<del> max_relerr = 0
<8>:<add> max_errs3 = []
<add>
<10>:<del> #A = torch.rand(2, 4092, dtype=dtype, device='cuda')
<11>:<del> #B = torch.rand(4*4092, 4092, dtype=dtype, device='cuda')
<12>:<del> #A = torch.rand(1, 4096, dtype=dtype, device='cuda')
<13>:<del> #B = torch.rand(4*4096, 4096, dtype=dtype, device='cuda')
<14>:<add> if kind == 'fc1':
<add> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<del> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<15>:<del> #B = torch.randn(4, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<16>:<add> B = torch.randn(dim*4, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<del> B = torch.randn(dim*4, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<17>:<add> elif kind == 'fc2':
<add> A = torch.randn(1, 4*dim, dtype=dtype, device='cuda')
<add> B = torch.randn(dim
|
<s>', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
+ def test_gemv_4bit(dtype, storage_type, double_quant, kind):
- def test_gemv_4bit(dtype, storage_type, double_quant):
<0> print('')
<1> for dim in [128, 256, 512, 1024, 2048, 4096]:
<2> #for dim in [4*1024]:
<3> #for dim in [1*16]:
<4> errs = []
<5> relerrs = []
<6> max_err = 0
<7> max_relerr = 0
<8>
<9> for i in range(100):
<10> #A = torch.rand(2, 4092, dtype=dtype, device='cuda')
<11> #B = torch.rand(4*4092, 4092, dtype=dtype, device='cuda')
<12> #A = torch.rand(1, 4096, dtype=dtype, device='cuda')
<13> #B = torch.rand(4*4096, 4096, dtype=dtype, device='cuda')
<14> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<15> #B = torch.randn(4, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<16> B = torch.randn(dim*4, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<17>
<18> #print('')
<19> #print(A)
<20> #print(B.t())
<21> #A[:, :-1] = 0
<22> #B[:, :-1] = 0
<23> #A.flatten()[:-1] = 0
<24> #B.flatten()[:-1] = 0
<25>
<26> qB, state = F.quantize_4bit(B, quant_</s>
|
===========below chunk 0===========
<s> 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
+ def test_gemv_4bit(dtype, storage_type, double_quant, kind):
- def test_gemv_4bit(dtype, storage_type, double_quant):
# offset: 1
#F.dequantize_4bit(qB, state)
C3 = torch.matmul(A, B.t())
C2 = F.gemv_4bit(A, qB.t(), state=state)
A.requires_grad = True
C1 = bnb.matmul_4bit(A, qB.t(), state)
#print(state)
#print(qB)
#print('')
#print(A)
#print(B)
#print('='*89)
#print(C3)
#print(C1.shape, C2.shape)
# tensor cores are non-deterministic
# so we need to analyze errors around the mean
# to test our implementation
err = torch.abs(C1-C2).float()
mag = torch.abs(C1).float()+1e-5
relerr = err/mag
max_err = max(err.max(), max_err)
max_relerr = max(relerr.max(), max_relerr)
err = err.mean().item()
relerr = relerr.mean().item()
#print(err)
errs.append(err)
relerrs.append(relerr)
c = int(C1.numel()*0.0014*(dim/256))+1
c = assert_all_approx_close(C1, C2, 1e-5, 0.01, count=c, throw=False)
#print('')</s>
===========below chunk 1===========
<s> 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
+ def test_gemv_4bit(dtype, storage_type, double_quant, kind):
- def test_gemv_4bit(dtype, storage_type, double_quant):
# offset: 2
<s>(C1, C2, 1e-5, 0.01, count=c, throw=False)
#print('')
#print(dim, sum(errs)/len(errs)/math.sqrt(dim))
#print(dim, sum(relerrs)/len(relerrs)/math.sqrt(dim))
#print(dim, (max_err.item(), max_relerr.item()))
print(C1.flatten()[-20:])
print(C2.flatten()[-20:])
#print(C1.flatten())
#print(C2.flatten())
#print(C3.flatten()[-20:])
print(sum(errs)/len(errs)/math.sqrt(dim) , dim)
print(sum(relerrs)/len(relerrs)/math.sqrt(dim) , dim)
if dtype == torch.float16:
assert sum(errs)/len(errs)/math.sqrt(dim) < 5e-5
assert sum(relerrs)/len(relerrs)/math.sqrt(dim) < 0.0005
elif dtype == torch.float32:
assert sum(errs)/len(errs)/math.sqrt(dim) < 5e-8
assert sum(relerrs)/len(relerrs)/math.sqrt(dim) < 1e-7
elif dtype == torch.bfloat16:
assert sum(errs)/len(err</s>
===========below chunk 2===========
<s> 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
+ def test_gemv_4bit(dtype, storage_type, double_quant, kind):
- def test_gemv_4bit(dtype, storage_type, double_quant):
# offset: 3
<s>math.sqrt(dim) < 3e-4
assert sum(relerrs)/len(relerrs)/math.sqrt(dim) < 0.003
===========changed ref 0===========
# module: tests.test_functional
#print((time.time()-t0)/1e6)
@pytest.mark.parametrize("dtype", [torch.float32, torch.float16, torch.bfloat16], ids=["fp32", "fp16", "bf16"])
def test_fp4_quant(dtype):
vals = list(product([0, 1], repeat=4))
code = {}
for bits in vals:
result = 0
bias = 3
sign, e1, e2, p1 = bits
idx = sign*8 + e1*4 + e2*2 + p1*1
sign = -1.0 if sign else 1.0
exp = e1*2 + e2*1
if exp == 0:
# sub-normal
if p1 == 0: result = 0
else: result = sign*0.0625
else:
# normal
exp = 2**(-exp + bias + 1)
frac = 1.5 if p1 else 1.0
result = sign*exp*frac
code[idx] = result
A1 = torch.randn(1024, 1024, device='cuda', dtype=dtype)
qa, SA = F.quantize_fp4(A1, blocksize=64)
A2 = F.dequantize_fp4(qa, SA)
err = (A1 - A2).abs().float()
+ relerr = (err/(A1.abs().float()+1e-8)).mean()
- relerr = (err/A1.abs().float()).mean()
idx = err > 1.0
err = err.mean()
assert A2.dtype == dtype
assert err.item() < 0.1
assert relerr.item() < 0.28
===========changed ref 1===========
# module: bitsandbytes.autograd._functions
def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
assert quant_state is not None
if A.numel() == A.shape[-1] and A.requires_grad == False:
+ absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = quant_state
+ if A.shape[-1] % blocksize != 0:
+ warn(f'Some matrices hidden dimension is not a multiple of {blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}')
+ return MatMul4Bit.apply(A, B, out, bias, quant_state)
+ else:
+ return F.gemv_4bit(A, B.t(), out, state=quant_state)
- return F.gemv_4bit(A, B.t(), out, state=quant_state)
else:
return MatMul4Bit.apply(A, B, out, bias, quant_state)
|
tests.test_generation/test_pi
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
dc96e9e7c8d0c5f32083af546a0949b41f8f5fef
|
Test for bloom that fails with inference kernels.
|
<0>:<add> model, tokenizer = model_and_tokenizer
<add>
<1>:<add> max_new_tokens=20,
<del> max_new_tokens=128,
<6>:<add> generation_config.max_new_tokens = 20
<del> generation_config.max_new_tokens = 50
<12>:<add> n_cases = 3
<13>:<add> if hasattr(model.config, 'quantization_config'):
<add> model.config.quantization_config.bnb_4bit_compute_dtype = dtype
<del> model.config.quantization_config.bnb_4bit_compute_dtype = dtype
<15>:<add> if not inference_kernel:
<add> text = [text]*n_cases
<16>:<add> x = inputs['input_ids']
<add> failure_count = 0
<add> outputs = []
<add> if inference_kernel:
<add> for i in range(n_cases):
<add> output = model.generate(x, generation_config=generation_config)
<del> outputs = model.generate(inputs=inputs['input_ids'], generation_config=generation_config)
<17>:<add> textout = tokenizer.decode(output[0], skip_special_tokens=True)
<del> textout = tokenizer.decode(outputs[0], skip_special_tokens=True)
<18>:<add> outputs.append(textout)
<add> else:
<add> outputs = model.generate(x, generation_config=generation_config)
<add> outputs =
|
# module: tests.test_generation
+ @pytest.mark.parametrize("inference_kernel", [True, False], ids=['inference_kernel_True', 'inference_kernel_False'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
+ def test_pi(model_and_tokenizer, dtype, inference_kernel):
- def test_pi(model, tokenizer, dtype):
<0> generation_config = transformers.GenerationConfig(
<1> max_new_tokens=128,
<2> do_sample=True,
<3> top_p=0.9,
<4> temperature=0.7,
<5> )
<6> generation_config.max_new_tokens = 50
<7>
<8>
<9> #text = 'Please write down the first 50 digits of pi.'
<10> #text = get_prompt_for_generation_eval(text)
<11> #text += ' Sure, here the first 50 digits of pi: 3.14159'
<12> text = '3.14159'
<13> model.config.quantization_config.bnb_4bit_compute_dtype = dtype
<14>
<15> inputs = tokenizer(text, return_tensors="pt").to('cuda:0')
<16> outputs = model.generate(inputs=inputs['input_ids'], generation_config=generation_config)
<17> textout = tokenizer.decode(outputs[0], skip_special_tokens=True)
<18> print('')
<19> print(textout)
<20> print(math.pi)
<21>
<22> assert textout[:len(str(math.pi))] == str(math.pi)
<23>
|
===========unchanged ref 0===========
at: _pytest.fixtures
fixture(fixture_function: FixtureFunction, *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[
Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]]
]=..., name: Optional[str]=...) -> FixtureFunction
fixture(fixture_function: None=..., *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[
Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]]
]=..., name: Optional[str]=None) -> FixtureFunctionMarker
at: _pytest.mark.structures
MARK_GEN = MarkGenerator(_ispytest=True)
at: _pytest.mark.structures.MarkGenerator
skip: _SkipMarkDecorator
skipif: _SkipifMarkDecorator
xfail: _XfailMarkDecorator
parametrize: _ParametrizeMarkDecorator
usefixtures: _UsefixturesMarkDecorator
filterwarnings: _FilterwarningsMarkDecorator
at: tests.test_generation
values = list(product(models, dtypes))
ids = ['_'.join(strfunc(x)) for x in values]
at: tests.test_generation.model_and_tokenizer
model, tokenizer = get_model_and_tokenizer(request.param)
at: torch._C
float32: dtype = ...
float16: dtype = ...
bfloat16: dtype = ...
at: transformers.generation.configuration_utils
GenerationConfig(**kwargs)
at: transformers.generation.configuration_utils.GenerationConfig.__init__
self.max_new_tokens = kwargs.pop("max_new_tokens", None)
===========changed ref 0===========
# module: tests.test_generation
- @pytest.fixture(scope='session')
- def tokenizer():
- tokenizer = transformers.AutoTokenizer.from_pretrained(name_or_path)
- return tokenizer
-
===========changed ref 1===========
# module: tests.test_generation
+ @pytest.fixture(scope='session', params=values, ids=ids)
+ def model_and_tokenizer(request):
+ model, tokenizer = get_model_and_tokenizer(request.param)
+ yield model, tokenizer
+ del model
+
===========changed ref 2===========
# module: tests.test_generation
- #name_or_path = 'AI-Sweden/gpt-sw3-126m'
-
- @pytest.fixture(scope='session')
- def model():
- bnb_config = get_4bit_config()
- bnb_config.bnb_4bit_compute_dtype=torch.float32
- bnb_config.load_in_4bit=True
- model = get_model(name_or_path)
- print('')
- return model
-
===========changed ref 3===========
# module: tests.test_generation
- def get_model(model_name_or_path='huggyllama/llama-7b', bnb_config=get_4bit_config()):
- model = AutoModelForCausalLM.from_pretrained(
- model_name_or_path,
- quantization_config=bnb_config,
- max_memory={0:'48GB'},
- device_map='auto'
- ).eval()
-
- return model
-
===========changed ref 4===========
# module: tests.test_generation
+ models = ['huggyllama/llama-7b', 'bigscience/bloom-1b7']
+ dtypes = ['nf4', 'fp4', '16bit']
+ load_in_4bit = [True, False]
+ values = list(product(models, dtypes))
+ strfunc = lambda lst: [str(x) for x in lst]
+ ids = ['_'.join(strfunc(x)) for x in values]
- name_or_path = 'huggyllama/llama-7b'
===========changed ref 5===========
# module: tests.test_generation
+ def get_model_and_tokenizer(config):
+ model_name_or_path, quant_type = config
+ bnb_config = get_4bit_config()
+ if quant_type == '16bit':
+ bnb_config.load_in_4bit = False
+ else:
+ bnb_config.bnb_4bit_quant_type= quant_type
+ model = AutoModelForCausalLM.from_pretrained(model_name_or_path,
+ quantization_config=bnb_config,
+ max_memory={0:'48GB'},
+ device_map='auto',
+ torch_dtype=torch.bfloat16
+ ).eval()
+
+ tokenizer = transformers.AutoTokenizer.from_pretrained(model_name_or_path)
+
+ return model, tokenizer
+
|
bitsandbytes.autograd._functions/matmul_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
90b0ac57b0d8d8f996126deb8bba6b7dc75b4327
|
Fixed missing bias in bnb.matmul_4bit for inference; more tests.
|
<7>:<add> out = F.gemv_4bit(A, B.t(), out, state=quant_state)
<del> return F.gemv_4bit(A, B.t(), out, state=quant_state)
<8>:<add> if bias is not None:
<add> out += bias
<add> return out
|
# module: bitsandbytes.autograd._functions
def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
<0> assert quant_state is not None
<1> if A.numel() == A.shape[-1] and A.requires_grad == False:
<2> absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = quant_state
<3> if A.shape[-1] % blocksize != 0:
<4> warn(f'Some matrices hidden dimension is not a multiple of {blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}')
<5> return MatMul4Bit.apply(A, B, out, bias, quant_state)
<6> else:
<7> return F.gemv_4bit(A, B.t(), out, state=quant_state)
<8> else:
<9> return MatMul4Bit.apply(A, B, out, bias, quant_state)
<10>
| |
tests.test_functional/test_gemv_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
90b0ac57b0d8d8f996126deb8bba6b7dc75b4327
|
Fixed missing bias in bnb.matmul_4bit for inference; more tests.
|
<0>:<add> for dim in [128, 256, 512, 1024]:
<del> for dim in [128, 256, 512, 1024, 2048, 4096, 6144]:
|
<s>4', 'fp4'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
<0> for dim in [128, 256, 512, 1024, 2048, 4096, 6144]:
<1> #for dim in [4*1024]:
<2> #for dim in [1*128]:
<3> errs1 = []
<4> errs2 = []
<5> errs3 = []
<6> relerrs1 = []
<7> relerrs2 = []
<8> relerrs3 = []
<9> max_errs1 = []
<10> max_errs2 = []
<11> max_errs3 = []
<12>
<13>
<14> for i in range(100):
<15> if kind == 'fc1':
<16> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<17> B = torch.randn(dim*4, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<18> elif kind == 'fc2':
<19> A = torch.randn(1, 4*dim, dtype=dtype, device='cuda')
<20> B = torch.randn(dim, 4*dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<21> elif kind == 'attn':
<22> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<23> B = torch.randn(dim, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<24> elif kind == 'attn_packed':
<25> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<26> B = torch.randn(dim*3,</s>
|
===========below chunk 0===========
<s>'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 1
qB, state = F.quantize_4bit(B, quant_type=storage_type, compress_statistics=double_quant)
C3 = torch.matmul(A, B.t())
C2 = F.gemv_4bit(A, qB.t(), state=state)
A.requires_grad = True
C1 = bnb.matmul_4bit(A, qB.t(), state)
err1 = (C1-C2).abs().float()
err2 = (C3-C2).abs().float()
err3 = (C3-C1).abs().float()
mag1 = torch.abs(C1).float()+1e-5
mag2 = torch.abs(C3).float()+1e-5
mag3 = torch.abs(C3).float()+1e-5
relerr1 = err1/mag1
relerr2 = err2/mag2
relerr3 = err3/mag3
max_err1 = err1.max()
max_err2 = err2.max()
max_err3 = err3.max()
errs1.append(err1.mean().item())
errs2.append(err2.mean().item())
errs3.append(err3.mean().item())
relerrs1.append(relerr1.mean().item())
relerrs2.append(relerr2.mean().item())
relerrs3.append(relerr3.mean().item())
max_</s>
===========below chunk 1===========
<s>'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 2
<s>relerr2.mean().item())
relerrs3.append(relerr3.mean().item())
max_errs1.append(max_err1.item())
max_errs2.append(max_err2.item())
max_errs3.append(max_err3.item())
c = int(C1.numel()*0.0014*(dim/256))+1
c = assert_all_approx_close(C1, C2, 1e-5, 0.01, count=c, throw=False)
err1 = sum(errs1)/len(errs1)/math.sqrt(dim)
err2 = sum(errs2)/len(errs2)/math.sqrt(dim)
err3 = sum(errs3)/len(errs3)/math.sqrt(dim)
relerr1 = sum(relerrs1)/len(relerrs1)/math.sqrt(dim)
relerr2 = sum(relerrs2)/len(relerrs2)/math.sqrt(dim)
relerr3 = sum(relerrs3)/len(relerrs3)/math.sqrt(dim)
maxerr1 = sum(max_errs1)/len(max_errs1)/math.sqrt(dim)
maxerr2 = sum(max_errs2)/len(max_errs2)/math.sqrt(dim)
maxerr3 = sum(max</s>
===========below chunk 2===========
<s>'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 3
<s>s3)/len(max_errs3)/math.sqrt(dim)
absratio = err2/err3
relratio = relerr2/relerr3
maxratio = relerr2/relerr3
# for debugging if the tests fails
#
#print('='*80)
#print(f'For matmul: {A.shape}, {B.shape}, {kind}, {dtype}, {storage_type}, double_quant={double_quant}:')
#print(C1.flatten()[-20:])
#print(C2.flatten()[-20:])
#print(f'inference vs training abs: {err1}')
#print(f'inference vs training rel: {relerr1}')
#print(f'inference vs training max: {maxerr1}')
#print(f'inference vs training vs torch err ratio abs: {absratio}')
#print(f'inference vs training vs torch err ratio rel: {relratio}')
#print(f'inference vs training vs torch err ratio max: {maxratio}')
if dtype == torch.float16:
if dim <= 512:
assert err1 < 7e-5
assert relerr1 < 0.0008
else:
assert err1 < 6e-5
assert relerr1 < 2e-4
assert absratio < 1.005 and absratio > 0.995
assert relratio <</s>
===========below chunk 3===========
<s>'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 4
<s>005 and relratio > 0.995
assert maxratio < 1.005 and maxratio > 0.995
elif dtype == torch.float32:
if dim <= 512:
assert err1 < 5e-8
assert relerr1 < 1e-6
assert maxerr1 < 1e-7
else:
assert err1 < 5e-8
assert relerr1 < 8e-6
assert maxerr1 < 1e-7
assert absratio < 1.005 and absratio > 0.995
assert relratio < 1.005 and relratio > 0.995
assert maxratio < 1.005 and maxratio > 0.995
elif dtype == torch.bfloat16:
if dim <= 512:
assert err1 < 5e-4
assert relerr1 < 0.007
assert maxerr1 < 0.015
else:
assert err1 < 2e-4
assert relerr1 < 0.002
assert maxerr1 < 0.0012
assert absratio < 1.005 and absratio > 0.995
assert relratio < 1.04 and relratio > 0.96
assert maxratio < 1.02 and maxratio > 0.98
|
tests.test_generation/model_and_tokenizer
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
90b0ac57b0d8d8f996126deb8bba6b7dc75b4327
|
Fixed missing bias in bnb.matmul_4bit for inference; more tests.
|
<1>:<add> yield request.param, model, tokenizer
<del> yield model, tokenizer
|
# module: tests.test_generation
@pytest.fixture(scope='session', params=values, ids=ids)
def model_and_tokenizer(request):
<0> model, tokenizer = get_model_and_tokenizer(request.param)
<1> yield model, tokenizer
<2> del model
<3>
|
===========unchanged ref 0===========
at: _pytest.fixtures
fixture(fixture_function: FixtureFunction, *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[
Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]]
]=..., name: Optional[str]=...) -> FixtureFunction
fixture(fixture_function: None=..., *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[
Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]]
]=..., name: Optional[str]=None) -> FixtureFunctionMarker
at: tests.test_generation
get_model_and_tokenizer(config)
values = list(product(models, dtypes))
ids = ['_'.join(strfunc(x)) for x in values]
===========changed ref 0===========
# module: tests.test_generation
models = ['huggyllama/llama-7b', 'bigscience/bloom-1b7']
+ dtypes = ['nf4', 'fp4']
- dtypes = ['nf4', 'fp4', '16bit']
load_in_4bit = [True, False]
values = list(product(models, dtypes))
strfunc = lambda lst: [str(x) for x in lst]
ids = ['_'.join(strfunc(x)) for x in values]
===========changed ref 1===========
# module: bitsandbytes.autograd._functions
def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
assert quant_state is not None
if A.numel() == A.shape[-1] and A.requires_grad == False:
absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = quant_state
if A.shape[-1] % blocksize != 0:
warn(f'Some matrices hidden dimension is not a multiple of {blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}')
return MatMul4Bit.apply(A, B, out, bias, quant_state)
else:
+ out = F.gemv_4bit(A, B.t(), out, state=quant_state)
- return F.gemv_4bit(A, B.t(), out, state=quant_state)
+ if bias is not None:
+ out += bias
+ return out
else:
return MatMul4Bit.apply(A, B, out, bias, quant_state)
===========changed ref 2===========
<s>.parametrize("storage_type", ['nf4', 'fp4'], ids=['nf4', 'fp4'])
+ @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
+ @pytest.mark.parametrize("double_quant", [False], ids=['DQ_True'])
+ def test_gemv_eye_4bit(storage_type, dtype, double_quant):
+ dims = 10
+ torch.random.manual_seed(np.random.randint(0, 412424242))
+ dims = torch.randint(0, 8192, size=(dims,)).tolist()
+ dims = [dim + (64-(dim % 64)) for dim in dims]
+ #for dim in [576, 5120, 3520, 5184, 1280, 4992, 5312, 2048]:
+ for dim in dims:
+ A = torch.normal(0, 0.1, size=(1, 1, dim), dtype=dtype, device='cuda')
+ B = torch.eye(dim, dtype=dtype, device='cuda')
+
+ qB, state = F.quantize_4bit(B, quant_type=storage_type, compress_statistics=double_quant)
+ C3 = torch.matmul(A, B.t())
+ C2 = bnb.matmul_4bit(A, qB.t(), state)
+ A.requires_grad = True
+ C1 = bnb.matmul_4bit(A, qB.t(), state)
+
+ torch.testing.assert_close(A, C3)
+ torch.testing.assert_close(A, C1)
+ torch.testing.assert_close(A, C2)
+
===========changed ref 3===========
<s>4', 'fp4'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
+ for dim in [128, 256, 512, 1024]:
- for dim in [128, 256, 512, 1024, 2048, 4096, 6144]:
#for dim in [4*1024]:
#for dim in [1*128]:
errs1 = []
errs2 = []
errs3 = []
relerrs1 = []
relerrs2 = []
relerrs3 = []
max_errs1 = []
max_errs2 = []
max_errs3 = []
for i in range(100):
if kind == 'fc1':
A = torch.randn(1, dim, dtype=dtype, device='cuda')
B = torch.randn(dim*4, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
elif kind == 'fc2':
A = torch.randn(1, 4*dim, dtype=dtype, device='cuda')
B = torch.randn(dim, 4*dim, dtype=dtype, device='cuda')/math.sqrt(dim)
elif kind == 'attn':
A = torch.randn(1, dim, dtype=dtype, device='cuda')
B = torch.randn(dim, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
elif kind == 'attn_packed':
A = torch.randn(1, dim, dtype=dtype, device='cuda')
B = torch.randn(dim*3, dim, dtype=dtype, device='cuda')</s>
|
tests.test_generation/test_pi
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
90b0ac57b0d8d8f996126deb8bba6b7dc75b4327
|
Fixed missing bias in bnb.matmul_4bit for inference; more tests.
|
<0>:<add> print('')
<add> dtype = torch.float16
<add>
<add> fixture_config, model, tokenizer = model_and_tokenizer
<del> model, tokenizer = model_and_tokenizer
<14>:<add> n_cases = 6
<del> n_cases = 3
<18>:<add> model.config.quantization_config.bnb_4bit_use_double_quant = DQ
<23>:<del> failure_count = 0
|
<s>.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
- @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
+ def test_pi(model_and_tokenizer, inference_kernel, DQ):
- def test_pi(model_and_tokenizer, dtype, inference_kernel):
<0> model, tokenizer = model_and_tokenizer
<1>
<2> generation_config = transformers.GenerationConfig(
<3> max_new_tokens=20,
<4> do_sample=True,
<5> top_p=0.9,
<6> temperature=0.7,
<7> )
<8> generation_config.max_new_tokens = 20
<9>
<10>
<11> #text = 'Please write down the first 50 digits of pi.'
<12> #text = get_prompt_for_generation_eval(text)
<13> #text += ' Sure, here the first 50 digits of pi: 3.14159'
<14> n_cases = 3
<15> text = '3.14159'
<16> if hasattr(model.config, 'quantization_config'):
<17> model.config.quantization_config.bnb_4bit_compute_dtype = dtype
<18>
<19> if not inference_kernel:
<20> text = [text]*n_cases
<21> inputs = tokenizer(text, return_tensors="pt").to('cuda:0')
<22> x = inputs['input_ids']
<23> failure_count = 0
<24> outputs = []
<25> if inference_kernel:
<26> for i in range(n_cases):
<27> output = model.generate(x, generation_config=generation_config)
<28> textout = tokenizer.decode(output[0], skip_special_tokens=True)
<29> outputs.append(textout)
<30> else:
<31> outputs = model.generate(x, generation_config=generation_config)
<32> outputs = [tokenizer.decode(output,</s>
|
===========below chunk 0===========
<s>dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
- @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
+ def test_pi(model_and_tokenizer, inference_kernel, DQ):
- def test_pi(model_and_tokenizer, dtype, inference_kernel):
# offset: 1
assert len(outputs) == n_cases
for i in range(n_cases):
if not outputs[i][:len(str(math.pi))] == str(math.pi):
failure_count += 1
if failure_count > 1:
print(math.pi)
for out in outputs:
print(out)
raise ValueError(f'Failure count: {failure_count}/{n_cases}')
===========unchanged ref 0===========
at: _pytest.mark.structures
MARK_GEN = MarkGenerator(_ispytest=True)
at: _pytest.mark.structures.MarkGenerator
skip: _SkipMarkDecorator
skipif: _SkipifMarkDecorator
xfail: _XfailMarkDecorator
parametrize: _ParametrizeMarkDecorator
usefixtures: _UsefixturesMarkDecorator
filterwarnings: _FilterwarningsMarkDecorator
at: math
pi: float
at: torch._C
float16: dtype = ...
at: transformers.generation.configuration_utils
GenerationConfig(**kwargs)
at: transformers.generation.configuration_utils.GenerationConfig.__init__
self.max_new_tokens = kwargs.pop("max_new_tokens", None)
===========changed ref 0===========
# module: tests.test_generation
@pytest.fixture(scope='session', params=values, ids=ids)
def model_and_tokenizer(request):
model, tokenizer = get_model_and_tokenizer(request.param)
+ yield request.param, model, tokenizer
- yield model, tokenizer
del model
===========changed ref 1===========
# module: tests.test_generation
models = ['huggyllama/llama-7b', 'bigscience/bloom-1b7']
+ dtypes = ['nf4', 'fp4']
- dtypes = ['nf4', 'fp4', '16bit']
load_in_4bit = [True, False]
values = list(product(models, dtypes))
strfunc = lambda lst: [str(x) for x in lst]
ids = ['_'.join(strfunc(x)) for x in values]
===========changed ref 2===========
# module: bitsandbytes.autograd._functions
def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
assert quant_state is not None
if A.numel() == A.shape[-1] and A.requires_grad == False:
absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = quant_state
if A.shape[-1] % blocksize != 0:
warn(f'Some matrices hidden dimension is not a multiple of {blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}')
return MatMul4Bit.apply(A, B, out, bias, quant_state)
else:
+ out = F.gemv_4bit(A, B.t(), out, state=quant_state)
- return F.gemv_4bit(A, B.t(), out, state=quant_state)
+ if bias is not None:
+ out += bias
+ return out
else:
return MatMul4Bit.apply(A, B, out, bias, quant_state)
===========changed ref 3===========
<s>.parametrize("storage_type", ['nf4', 'fp4'], ids=['nf4', 'fp4'])
+ @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
+ @pytest.mark.parametrize("double_quant", [False], ids=['DQ_True'])
+ def test_gemv_eye_4bit(storage_type, dtype, double_quant):
+ dims = 10
+ torch.random.manual_seed(np.random.randint(0, 412424242))
+ dims = torch.randint(0, 8192, size=(dims,)).tolist()
+ dims = [dim + (64-(dim % 64)) for dim in dims]
+ #for dim in [576, 5120, 3520, 5184, 1280, 4992, 5312, 2048]:
+ for dim in dims:
+ A = torch.normal(0, 0.1, size=(1, 1, dim), dtype=dtype, device='cuda')
+ B = torch.eye(dim, dtype=dtype, device='cuda')
+
+ qB, state = F.quantize_4bit(B, quant_type=storage_type, compress_statistics=double_quant)
+ C3 = torch.matmul(A, B.t())
+ C2 = bnb.matmul_4bit(A, qB.t(), state)
+ A.requires_grad = True
+ C1 = bnb.matmul_4bit(A, qB.t(), state)
+
+ torch.testing.assert_close(A, C3)
+ torch.testing.assert_close(A, C1)
+ torch.testing.assert_close(A, C2)
+
===========changed ref 4===========
<s>4', 'fp4'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
+ for dim in [128, 256, 512, 1024]:
- for dim in [128, 256, 512, 1024, 2048, 4096, 6144]:
#for dim in [4*1024]:
#for dim in [1*128]:
errs1 = []
errs2 = []
errs3 = []
relerrs1 = []
relerrs2 = []
relerrs3 = []
max_errs1 = []
max_errs2 = []
max_errs3 = []
for i in range(100):
if kind == 'fc1':
A = torch.randn(1, dim, dtype=dtype, device='cuda')
B = torch.randn(dim*4, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
elif kind == 'fc2':
A = torch.randn(1, 4*dim, dtype=dtype, device='cuda')
B = torch.randn(dim, 4*dim, dtype=dtype, device='cuda')/math.sqrt(dim)
elif kind == 'attn':
A = torch.randn(1, dim, dtype=dtype, device='cuda')
B = torch.randn(dim, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
elif kind == 'attn_packed':
A = torch.randn(1, dim, dtype=dtype, device='cuda')
B = torch.randn(dim*3, dim, dtype=dtype, device='cuda')</s>
|
bitsandbytes.cuda_setup.main/CUDASetup.generate_instructions
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
91c4fd844b715bba8d21e9bd94a27edb1d5bd20c
|
add public git repo URL
|
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
<0> if getattr(self, 'error', False): return
<1> print(self.error)
<2> self.error = True
<3> if self.cuda is None:
<4> self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected.')
<5> self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.')
<6> self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:')
<7> self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null')
<8> self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a')
<9> self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc')
<10> return
<11>
<12> if self.cudart_path is None:
<13> self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.')
<14> self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable')
<15> self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev/null')
<16> self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_</s>
|
===========below chunk 0===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 1
self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from 1b into your .bashrc file, located at ~/.bashrc')
self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.')
self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/cuda_install.sh')
self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.')
self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local')
return
make_cmd = f'CUDA_VERSION={self.cuda_version_string}'
if len(self.cuda_version_string) < 3:
make_cmd += ' make cuda92'
elif self.cuda_version_string == '110':
make_cmd += ' make cuda110'
elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0:
make_cmd += ' make cuda11x'
elif self.cuda_version_string == '100':
self.add_log_entry('CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.')
self.add_log_entry('CUDA SETUP: Before you try again running bitsandbytes, make sure old CUDA 10.0 versions are uninstalled and removed from $LD_LIBRARY_PATH variables.')
return
has_cublaslt = is_cublasLt_compatible(</s>
===========below chunk 1===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 2
<s>LD_LIBRARY_PATH variables.')
return
has_cublaslt = is_cublasLt_compatible(self.cc)
if not has_cublaslt:
make_cmd += '_nomatmul'
self.add_log_entry('CUDA SETUP: Something unexpected happened. Please compile from source:')
self.add_log_entry('git clone [email protected]:TimDettmers/bitsandbytes.git')
self.add_log_entry('cd bitsandbytes')
self.add_log_entry(make_cmd)
self.add_log_entry('python setup.py install')
|
|
bitsandbytes.cuda_setup.main/CUDASetup.generate_instructions
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6102029ab98b7dd9cc6651966b1e882f59961afe
|
Merge pull request #587 from BramVanroy/patch-1
|
<3>:<add> if not self.cuda_available:
<del> if self.cuda is None:
<4>:<add> self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected or CUDA not installed.')
<del> self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected.')
<10>:<add> self.add_log_entry('CUDA SETUP: Solution 3): For a missing CUDA runtime library (libcudart.so), use `find / -name libcudart.so* and follow with step (2b)')
|
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
<0> if getattr(self, 'error', False): return
<1> print(self.error)
<2> self.error = True
<3> if self.cuda is None:
<4> self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected.')
<5> self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.')
<6> self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:')
<7> self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null')
<8> self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a')
<9> self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc')
<10> return
<11>
<12> if self.cudart_path is None:
<13> self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.')
<14> self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable')
<15> self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev/null')
<16> self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_</s>
|
===========below chunk 0===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 1
self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from 1b into your .bashrc file, located at ~/.bashrc')
self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.')
self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/cuda_install.sh')
self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.')
self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local')
return
make_cmd = f'CUDA_VERSION={self.cuda_version_string}'
if len(self.cuda_version_string) < 3:
make_cmd += ' make cuda92'
elif self.cuda_version_string == '110':
make_cmd += ' make cuda110'
elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0:
make_cmd += ' make cuda11x'
elif self.cuda_version_string == '100':
self.add_log_entry('CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.')
self.add_log_entry('CUDA SETUP: Before you try again running bitsandbytes, make sure old CUDA 10.0 versions are uninstalled and removed from $LD_LIBRARY_PATH variables.')
return
has_cublaslt = is_cublasLt_compatible(</s>
===========below chunk 1===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 2
<s>LD_LIBRARY_PATH variables.')
return
has_cublaslt = is_cublasLt_compatible(self.cc)
if not has_cublaslt:
make_cmd += '_nomatmul'
self.add_log_entry('CUDA SETUP: Something unexpected happened. Please compile from source:')
self.add_log_entry('git clone https://github.com/TimDettmers/bitsandbytes.git')
self.add_log_entry('cd bitsandbytes')
self.add_log_entry(make_cmd)
self.add_log_entry('python setup.py install')
|
bitsandbytes.cuda_setup.main/CUDASetup.run_cuda_setup
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6102029ab98b7dd9cc6651966b1e882f59961afe
|
Merge pull request #587 from BramVanroy/patch-1
|
<3>:<add> binary_name, cudart_path, cc, cuda_version_string = evaluate_cuda_setup()
<del> binary_name, cudart_path, cuda, cc, cuda_version_string = evaluate_cuda_setup()
<5>:<add> self.cuda_available = torch.cuda.is_available()
<del> self.cuda = cuda
<8>:<add> self.binary_name = binary_name
<add> self.manual_override()
<10>:<add> binary_path = package_dir / self.binary_name
<del> binary_path = package_dir / binary_name
<11>:<del>
<12>:<del> print('bin', binary_path)
<24>:<add> self.add_log_entry('1. You need to manually override the PyTorch CUDA version. Please see: '
<add> '"https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md')
<add> self.add_log_entry('2. CUDA driver not installed')
<del> self.add_log_entry('1. CUDA driver not installed')
<25>:<add> self.add_log_entry('3. CUDA not installed')
<del> self.add_log_entry('2. CUDA not installed')
<26>:<add> self.add_log_entry('4. You have multiple conflicting CUDA libraries')
<del> self.add_log_entry('3. You have multiple conflicting CUDA libraries')
<27>:<add> self.add_log_entry
|
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
<0> self.initialized = True
<1> self.cuda_setup_log = []
<2>
<3> binary_name, cudart_path, cuda, cc, cuda_version_string = evaluate_cuda_setup()
<4> self.cudart_path = cudart_path
<5> self.cuda = cuda
<6> self.cc = cc
<7> self.cuda_version_string = cuda_version_string
<8>
<9> package_dir = Path(__file__).parent.parent
<10> binary_path = package_dir / binary_name
<11>
<12> print('bin', binary_path)
<13>
<14> try:
<15> if not binary_path.exists():
<16> self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?")
<17> legacy_binary_name = "libbitsandbytes_cpu.so"
<18> self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...")
<19> binary_path = package_dir / legacy_binary_name
<20> if not binary_path.exists() or torch.cuda.is_available():
<21> self.add_log_entry('')
<22> self.add_log_entry('='*48 + 'ERROR' + '='*37)
<23> self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:')
<24> self.add_log_entry('1. CUDA driver not installed')
<25> self.add_log_entry('2. CUDA not installed')
<26> self.add_log_entry('3. You have multiple conflicting CUDA libraries')
<27> self.add_log_entry('4. Required library not pre-compiled for this bitsandbytes release!')
<28> self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `</s>
|
===========below chunk 0===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
# offset: 1
self.add_log_entry('CUDA SETUP: The CUDA version for the compile might depend on your conda install. Inspect CUDA version via `conda list | grep cuda`.')
self.add_log_entry('='*80)
self.add_log_entry('')
self.generate_instructions()
raise Exception('CUDA SETUP: Setup Failed!')
self.lib = ct.cdll.LoadLibrary(binary_path)
else:
self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path}...")
self.lib = ct.cdll.LoadLibrary(binary_path)
except Exception as ex:
self.add_log_entry(str(ex))
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
if getattr(self, 'error', False): return
print(self.error)
self.error = True
+ if not self.cuda_available:
- if self.cuda is None:
+ self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected or CUDA not installed.')
- self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected.')
self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.')
self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:')
self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a')
self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc')
+ self.add_log_entry('CUDA SETUP: Solution 3): For a missing CUDA runtime library (libcudart.so), use `find / -name libcudart.so* and follow with step (2b)')
return
if self.cudart_path is None:
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.')
self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added</s>
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 1
<s>add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable')
self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a')
self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from 1b into your .bashrc file, located at ~/.bashrc')
self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.')
self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/cuda_install.sh')
self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.')
self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local')
return
make_cmd = f'CUDA_VERSION={self.cuda_version_string}'
if len(self.cuda_version_string) < 3:
make_cmd += ' make cuda92'
elif self.cuda_version_string == '110':</s>
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 2
<s> make_cmd += ' make cuda110'
elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0:
make_cmd += ' make cuda11x'
elif self.cuda_version_string == '100':
self.add_log_entry('CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.')
self.add_log_entry('CUDA SETUP: Before you try again running bitsandbytes, make sure old CUDA 10.0 versions are uninstalled and removed from $LD_LIBRARY_PATH variables.')
return
has_cublaslt = is_cublasLt_compatible(self.cc)
if not has_cublaslt:
make_cmd += '_nomatmul'
self.add_log_entry('CUDA SETUP: Something unexpected happened. Please compile from source:')
self.add_log_entry('git clone https://github.com/TimDettmers/bitsandbytes.git')
self.add_log_entry('cd bitsandbytes')
self.add_log_entry(make_cmd)
self.add_log_entry('python setup.py install')
|
bitsandbytes.cuda_setup.main/is_cublasLt_compatible
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6102029ab98b7dd9cc6651966b1e882f59961afe
|
Merge pull request #587 from BramVanroy/patch-1
|
<4>:<add> CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU! \
<del> CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True)
<5>:<add> If you run into issues with 8-bit matmul, you can try 4-bit quantization: https://huggingface.co/blog/4bit-transformers-bitsandbytes", is_warning=True)
|
# module: bitsandbytes.cuda_setup.main
def is_cublasLt_compatible(cc):
<0> has_cublaslt = False
<1> if cc is not None:
<2> cc_major, cc_minor = cc.split('.')
<3> if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5):
<4> CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True)
<5> else:
<6> has_cublaslt = True
<7> return has_cublaslt
<8>
|
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
+ def manual_override(self):
+ if torch.cuda.is_available():
+ if 'BNB_CUDA_VERSION' in os.environ:
+ if len(os.environ['BNB_CUDA_VERSION']) > 0:
+ warn((f'\n\n{"="*80}\n'
+ 'WARNING: Manual override via BNB_CUDA_VERSION env variable detected!\n'
+ 'BNB_CUDA_VERSION=XXX can be used to load a bitsandbytes version that is different from the PyTorch CUDA version.\n'
+ 'If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\n'
+ 'If you use the manual override make sure the right libcudart.so is in your LD_LIBRARY_PATH\n'
+ 'For example by adding the following to your .bashrc: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<path_to_cuda_dir/lib64\n'
+ f'Loading CUDA version: BNB_CUDA_VERSION={os.environ["BNB_CUDA_VERSION"]}'
+ f'\n{"="*80}\n\n'))
+ self.binary_name = self.binary_name[:-6] + f'{os.environ["BNB_CUDA_VERSION"]}.so'
+
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
self.initialized = True
self.cuda_setup_log = []
+ binary_name, cudart_path, cc, cuda_version_string = evaluate_cuda_setup()
- binary_name, cudart_path, cuda, cc, cuda_version_string = evaluate_cuda_setup()
self.cudart_path = cudart_path
+ self.cuda_available = torch.cuda.is_available()
- self.cuda = cuda
self.cc = cc
self.cuda_version_string = cuda_version_string
+ self.binary_name = binary_name
+ self.manual_override()
package_dir = Path(__file__).parent.parent
+ binary_path = package_dir / self.binary_name
- binary_path = package_dir / binary_name
-
- print('bin', binary_path)
try:
if not binary_path.exists():
self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?")
legacy_binary_name = "libbitsandbytes_cpu.so"
self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...")
binary_path = package_dir / legacy_binary_name
if not binary_path.exists() or torch.cuda.is_available():
self.add_log_entry('')
self.add_log_entry('='*48 + 'ERROR' + '='*37)
self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:')
+ self.add_log_entry('1. You need to manually override the PyTorch CUDA version. Please see: '
+ '"https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_</s>
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
# offset: 1
<s>https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md')
+ self.add_log_entry('2. CUDA driver not installed')
- self.add_log_entry('1. CUDA driver not installed')
+ self.add_log_entry('3. CUDA not installed')
- self.add_log_entry('2. CUDA not installed')
+ self.add_log_entry('4. You have multiple conflicting CUDA libraries')
- self.add_log_entry('3. You have multiple conflicting CUDA libraries')
+ self.add_log_entry('5. Required library not pre-compiled for this bitsandbytes release!')
- self.add_log_entry('4. Required library not pre-compiled for this bitsandbytes release!')
self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.')
self.add_log_entry('CUDA SETUP: The CUDA version for the compile might depend on your conda install. Inspect CUDA version via `conda list | grep cuda`.')
self.add_log_entry('='*80)
self.add_log_entry('')
self.generate_instructions()
raise Exception('CUDA SETUP: Setup Failed!')
self.lib = ct.cdll.LoadLibrary(binary_path)
else:
self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path}...")
self.lib = ct.cdll.LoadLibrary(binary_path)
except Exception as ex:
self.add_log_entry(str(ex))
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
if getattr(self, 'error', False): return
print(self.error)
self.error = True
+ if not self.cuda_available:
- if self.cuda is None:
+ self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected or CUDA not installed.')
- self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected.')
self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.')
self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:')
self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a')
self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc')
+ self.add_log_entry('CUDA SETUP: Solution 3): For a missing CUDA runtime library (libcudart.so), use `find / -name libcudart.so* and follow with step (2b)')
return
if self.cudart_path is None:
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.')
self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added</s>
|
bitsandbytes.cuda_setup.main/remove_non_existent_dirs
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6102029ab98b7dd9cc6651966b1e882f59961afe
|
Merge pull request #587 from BramVanroy/patch-1
|
<11>:<add> CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
<del> CUDASetup.get_instance().add_log_entry("WARNING: The following directories listed in your path were found to "
<12>:<add> f"be non-existent: {non_existent_directories}", is_warning=False)
<del> f"be non-existent: {non_existent_directories}", is_warning=True)
|
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
<0> existent_directories: Set[Path] = set()
<1> for path in candidate_paths:
<2> try:
<3> if path.exists():
<4> existent_directories.add(path)
<5> except OSError as exc:
<6> if exc.errno != errno.ENAMETOOLONG:
<7> raise exc
<8>
<9> non_existent_directories: Set[Path] = candidate_paths - existent_directories
<10> if non_existent_directories:
<11> CUDASetup.get_instance().add_log_entry("WARNING: The following directories listed in your path were found to "
<12> f"be non-existent: {non_existent_directories}", is_warning=True)
<13>
<14> return existent_directories
<15>
|
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
def is_cublasLt_compatible(cc):
has_cublaslt = False
if cc is not None:
cc_major, cc_minor = cc.split('.')
if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5):
+ CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU! \
- CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True)
+ If you run into issues with 8-bit matmul, you can try 4-bit quantization: https://huggingface.co/blog/4bit-transformers-bitsandbytes", is_warning=True)
else:
has_cublaslt = True
return has_cublaslt
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
+ def manual_override(self):
+ if torch.cuda.is_available():
+ if 'BNB_CUDA_VERSION' in os.environ:
+ if len(os.environ['BNB_CUDA_VERSION']) > 0:
+ warn((f'\n\n{"="*80}\n'
+ 'WARNING: Manual override via BNB_CUDA_VERSION env variable detected!\n'
+ 'BNB_CUDA_VERSION=XXX can be used to load a bitsandbytes version that is different from the PyTorch CUDA version.\n'
+ 'If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\n'
+ 'If you use the manual override make sure the right libcudart.so is in your LD_LIBRARY_PATH\n'
+ 'For example by adding the following to your .bashrc: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<path_to_cuda_dir/lib64\n'
+ f'Loading CUDA version: BNB_CUDA_VERSION={os.environ["BNB_CUDA_VERSION"]}'
+ f'\n{"="*80}\n\n'))
+ self.binary_name = self.binary_name[:-6] + f'{os.environ["BNB_CUDA_VERSION"]}.so'
+
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
self.initialized = True
self.cuda_setup_log = []
+ binary_name, cudart_path, cc, cuda_version_string = evaluate_cuda_setup()
- binary_name, cudart_path, cuda, cc, cuda_version_string = evaluate_cuda_setup()
self.cudart_path = cudart_path
+ self.cuda_available = torch.cuda.is_available()
- self.cuda = cuda
self.cc = cc
self.cuda_version_string = cuda_version_string
+ self.binary_name = binary_name
+ self.manual_override()
package_dir = Path(__file__).parent.parent
+ binary_path = package_dir / self.binary_name
- binary_path = package_dir / binary_name
-
- print('bin', binary_path)
try:
if not binary_path.exists():
self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?")
legacy_binary_name = "libbitsandbytes_cpu.so"
self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...")
binary_path = package_dir / legacy_binary_name
if not binary_path.exists() or torch.cuda.is_available():
self.add_log_entry('')
self.add_log_entry('='*48 + 'ERROR' + '='*37)
self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:')
+ self.add_log_entry('1. You need to manually override the PyTorch CUDA version. Please see: '
+ '"https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_</s>
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
# offset: 1
<s>https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md')
+ self.add_log_entry('2. CUDA driver not installed')
- self.add_log_entry('1. CUDA driver not installed')
+ self.add_log_entry('3. CUDA not installed')
- self.add_log_entry('2. CUDA not installed')
+ self.add_log_entry('4. You have multiple conflicting CUDA libraries')
- self.add_log_entry('3. You have multiple conflicting CUDA libraries')
+ self.add_log_entry('5. Required library not pre-compiled for this bitsandbytes release!')
- self.add_log_entry('4. Required library not pre-compiled for this bitsandbytes release!')
self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.')
self.add_log_entry('CUDA SETUP: The CUDA version for the compile might depend on your conda install. Inspect CUDA version via `conda list | grep cuda`.')
self.add_log_entry('='*80)
self.add_log_entry('')
self.generate_instructions()
raise Exception('CUDA SETUP: Setup Failed!')
self.lib = ct.cdll.LoadLibrary(binary_path)
else:
self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path}...")
self.lib = ct.cdll.LoadLibrary(binary_path)
except Exception as ex:
self.add_log_entry(str(ex))
|
bitsandbytes.cuda_setup.main/warn_in_case_of_duplicates
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6102029ab98b7dd9cc6651966b1e882f59961afe
|
Merge pull request #587 from BramVanroy/patch-1
|
<3>:<add> "We select the PyTorch default libcudart.so, which is {torch.version.cuda},"
<add> "but this might missmatch with the CUDA version that is needed for bitsandbytes."
<add> "To override this behavior set the BNB_CUDA_VERSION=<version string, e.g. 122> environmental variable"
<add> "For example, if you want to use the CUDA version 122"
<add> "BNB_CUDA_VERSION=122 python ..."
<add> "OR set the environmental variable in your .bashrc: export BNB_CUDA_VERSION=122"
<add> "In the case of a manual override, make sure you set the LD_LIBRARY_PATH, e.g."
<add> "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-11.2")
<del> "We'll flip a coin and try one of these, in order to fail forward.\n"
<4>:<del> "Either way, this might cause trouble in the future:\n"
<5>:<del> "If you get `CUDA error: invalid device function` errors, the above "
<6>:<del> "might be the cause and the solution is to make sure only one "
<7>:<del> f"{CUDA_RUNTIME_LIBS} in the paths that we search based on your env.")
|
# module: bitsandbytes.cuda_setup.main
def warn_in_case_of_duplicates(results_paths: Set[Path]) -> None:
<0> if len(results_paths) > 1:
<1> warning_msg = (
<2> f"Found duplicate {CUDA_RUNTIME_LIBS} files: {results_paths}.. "
<3> "We'll flip a coin and try one of these, in order to fail forward.\n"
<4> "Either way, this might cause trouble in the future:\n"
<5> "If you get `CUDA error: invalid device function` errors, the above "
<6> "might be the cause and the solution is to make sure only one "
<7> f"{CUDA_RUNTIME_LIBS} in the paths that we search based on your env.")
<8> CUDASetup.get_instance().add_log_entry(warning_msg, is_warning=True)
<9>
|
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
+ CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
- CUDASetup.get_instance().add_log_entry("WARNING: The following directories listed in your path were found to "
+ f"be non-existent: {non_existent_directories}", is_warning=False)
- f"be non-existent: {non_existent_directories}", is_warning=True)
return existent_directories
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
def is_cublasLt_compatible(cc):
has_cublaslt = False
if cc is not None:
cc_major, cc_minor = cc.split('.')
if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5):
+ CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU! \
- CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True)
+ If you run into issues with 8-bit matmul, you can try 4-bit quantization: https://huggingface.co/blog/4bit-transformers-bitsandbytes", is_warning=True)
else:
has_cublaslt = True
return has_cublaslt
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
+ def manual_override(self):
+ if torch.cuda.is_available():
+ if 'BNB_CUDA_VERSION' in os.environ:
+ if len(os.environ['BNB_CUDA_VERSION']) > 0:
+ warn((f'\n\n{"="*80}\n'
+ 'WARNING: Manual override via BNB_CUDA_VERSION env variable detected!\n'
+ 'BNB_CUDA_VERSION=XXX can be used to load a bitsandbytes version that is different from the PyTorch CUDA version.\n'
+ 'If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\n'
+ 'If you use the manual override make sure the right libcudart.so is in your LD_LIBRARY_PATH\n'
+ 'For example by adding the following to your .bashrc: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<path_to_cuda_dir/lib64\n'
+ f'Loading CUDA version: BNB_CUDA_VERSION={os.environ["BNB_CUDA_VERSION"]}'
+ f'\n{"="*80}\n\n'))
+ self.binary_name = self.binary_name[:-6] + f'{os.environ["BNB_CUDA_VERSION"]}.so'
+
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
self.initialized = True
self.cuda_setup_log = []
+ binary_name, cudart_path, cc, cuda_version_string = evaluate_cuda_setup()
- binary_name, cudart_path, cuda, cc, cuda_version_string = evaluate_cuda_setup()
self.cudart_path = cudart_path
+ self.cuda_available = torch.cuda.is_available()
- self.cuda = cuda
self.cc = cc
self.cuda_version_string = cuda_version_string
+ self.binary_name = binary_name
+ self.manual_override()
package_dir = Path(__file__).parent.parent
+ binary_path = package_dir / self.binary_name
- binary_path = package_dir / binary_name
-
- print('bin', binary_path)
try:
if not binary_path.exists():
self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?")
legacy_binary_name = "libbitsandbytes_cpu.so"
self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...")
binary_path = package_dir / legacy_binary_name
if not binary_path.exists() or torch.cuda.is_available():
self.add_log_entry('')
self.add_log_entry('='*48 + 'ERROR' + '='*37)
self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:')
+ self.add_log_entry('1. You need to manually override the PyTorch CUDA version. Please see: '
+ '"https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_</s>
===========changed ref 4===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
# offset: 1
<s>https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md')
+ self.add_log_entry('2. CUDA driver not installed')
- self.add_log_entry('1. CUDA driver not installed')
+ self.add_log_entry('3. CUDA not installed')
- self.add_log_entry('2. CUDA not installed')
+ self.add_log_entry('4. You have multiple conflicting CUDA libraries')
- self.add_log_entry('3. You have multiple conflicting CUDA libraries')
+ self.add_log_entry('5. Required library not pre-compiled for this bitsandbytes release!')
- self.add_log_entry('4. Required library not pre-compiled for this bitsandbytes release!')
self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.')
self.add_log_entry('CUDA SETUP: The CUDA version for the compile might depend on your conda install. Inspect CUDA version via `conda list | grep cuda`.')
self.add_log_entry('='*80)
self.add_log_entry('')
self.generate_instructions()
raise Exception('CUDA SETUP: Setup Failed!')
self.lib = ct.cdll.LoadLibrary(binary_path)
else:
self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path}...")
self.lib = ct.cdll.LoadLibrary(binary_path)
except Exception as ex:
self.add_log_entry(str(ex))
|
bitsandbytes.cuda_setup.main/determine_cuda_runtime_lib_path
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6102029ab98b7dd9cc6651966b1e882f59961afe
|
Merge pull request #587 from BramVanroy/patch-1
|
<13>:<add> cuda_runtime_libs = set()
<20>:<add> cuda_runtime_libs.update(conda_cuda_libs)
<del> return next(iter(conda_cuda_libs))
<29>:<add> cuda_runtime_libs.update(lib_ld_cuda_libs)
<del> return next(iter(lib_ld_cuda_libs))
|
# module: bitsandbytes.cuda_setup.main
def determine_cuda_runtime_lib_path() -> Union[Path, None]:
<0> """
<1> Searches for a cuda installations, in the following order of priority:
<2> 1. active conda env
<3> 2. LD_LIBRARY_PATH
<4> 3. any other env vars, while ignoring those that
<5> - are known to be unrelated (see `bnb.cuda_setup.env_vars.to_be_ignored`)
<6> - don't contain the path separator `/`
<7>
<8> If multiple libraries are found in part 3, we optimistically try one,
<9> while giving a warning message.
<10> """
<11> candidate_env_vars = get_potentially_lib_path_containing_env_vars()
<12>
<13> if "CONDA_PREFIX" in candidate_env_vars:
<14> conda_libs_path = Path(candidate_env_vars["CONDA_PREFIX"]) / "lib"
<15>
<16> conda_cuda_libs = find_cuda_lib_in(str(conda_libs_path))
<17> warn_in_case_of_duplicates(conda_cuda_libs)
<18>
<19> if conda_cuda_libs:
<20> return next(iter(conda_cuda_libs))
<21>
<22> CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["CONDA_PREFIX"]} did not contain '
<23> f'{CUDA_RUNTIME_LIBS} as expected! Searching further paths...', is_warning=True)
<24>
<25> if "LD_LIBRARY_PATH" in candidate_env_vars:
<26> lib_ld_cuda_libs = find_cuda_lib_in(candidate_env_vars["LD_LIBRARY_PATH"])
<27>
<28> if lib_ld_cuda_libs:
<29> return next(iter(lib_ld_cuda_libs))
<30> warn_in_case_of_duplicates(lib_ld_cuda_libs)
<31>
<32> CUDASetup.get_instance().add</s>
|
===========below chunk 0===========
# module: bitsandbytes.cuda_setup.main
def determine_cuda_runtime_lib_path() -> Union[Path, None]:
# offset: 1
f'{CUDA_RUNTIME_LIBS} as expected! Searching further paths...', is_warning=True)
remaining_candidate_env_vars = {
env_var: value for env_var, value in candidate_env_vars.items()
if env_var not in {"CONDA_PREFIX", "LD_LIBRARY_PATH"}
}
cuda_runtime_libs = set()
for env_var, value in remaining_candidate_env_vars.items():
cuda_runtime_libs.update(find_cuda_lib_in(value))
if len(cuda_runtime_libs) == 0:
CUDASetup.get_instance().add_log_entry('CUDA_SETUP: WARNING! libcudart.so not found in any environmental path. Searching in backup paths...')
cuda_runtime_libs.update(find_cuda_lib_in('/usr/local/cuda/lib64'))
warn_in_case_of_duplicates(cuda_runtime_libs)
return next(iter(cuda_runtime_libs)) if cuda_runtime_libs else None
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
+ CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
- CUDASetup.get_instance().add_log_entry("WARNING: The following directories listed in your path were found to "
+ f"be non-existent: {non_existent_directories}", is_warning=False)
- f"be non-existent: {non_existent_directories}", is_warning=True)
return existent_directories
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
def is_cublasLt_compatible(cc):
has_cublaslt = False
if cc is not None:
cc_major, cc_minor = cc.split('.')
if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5):
+ CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU! \
- CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True)
+ If you run into issues with 8-bit matmul, you can try 4-bit quantization: https://huggingface.co/blog/4bit-transformers-bitsandbytes", is_warning=True)
else:
has_cublaslt = True
return has_cublaslt
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
def warn_in_case_of_duplicates(results_paths: Set[Path]) -> None:
if len(results_paths) > 1:
warning_msg = (
f"Found duplicate {CUDA_RUNTIME_LIBS} files: {results_paths}.. "
+ "We select the PyTorch default libcudart.so, which is {torch.version.cuda},"
+ "but this might missmatch with the CUDA version that is needed for bitsandbytes."
+ "To override this behavior set the BNB_CUDA_VERSION=<version string, e.g. 122> environmental variable"
+ "For example, if you want to use the CUDA version 122"
+ "BNB_CUDA_VERSION=122 python ..."
+ "OR set the environmental variable in your .bashrc: export BNB_CUDA_VERSION=122"
+ "In the case of a manual override, make sure you set the LD_LIBRARY_PATH, e.g."
+ "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-11.2")
- "We'll flip a coin and try one of these, in order to fail forward.\n"
- "Either way, this might cause trouble in the future:\n"
- "If you get `CUDA error: invalid device function` errors, the above "
- "might be the cause and the solution is to make sure only one "
- f"{CUDA_RUNTIME_LIBS} in the paths that we search based on your env.")
CUDASetup.get_instance().add_log_entry(warning_msg, is_warning=True)
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
+ def manual_override(self):
+ if torch.cuda.is_available():
+ if 'BNB_CUDA_VERSION' in os.environ:
+ if len(os.environ['BNB_CUDA_VERSION']) > 0:
+ warn((f'\n\n{"="*80}\n'
+ 'WARNING: Manual override via BNB_CUDA_VERSION env variable detected!\n'
+ 'BNB_CUDA_VERSION=XXX can be used to load a bitsandbytes version that is different from the PyTorch CUDA version.\n'
+ 'If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\n'
+ 'If you use the manual override make sure the right libcudart.so is in your LD_LIBRARY_PATH\n'
+ 'For example by adding the following to your .bashrc: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<path_to_cuda_dir/lib64\n'
+ f'Loading CUDA version: BNB_CUDA_VERSION={os.environ["BNB_CUDA_VERSION"]}'
+ f'\n{"="*80}\n\n'))
+ self.binary_name = self.binary_name[:-6] + f'{os.environ["BNB_CUDA_VERSION"]}.so'
+
|
bitsandbytes.cuda_setup.main/get_cuda_version
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6102029ab98b7dd9cc6651966b1e882f59961afe
|
Merge pull request #587 from BramVanroy/patch-1
|
<0>:<del> if cuda is None: return None
<1>:<del>
<2>:<del> try:
<3>:<del> cudart = ct.CDLL(cudart_path)
<4>:<del> except OSError:
<5>:<del> CUDASetup.get_instance().add_log_entry(f'ERROR: libcudart.so could not be read from path: {cudart_path}!')
<6>:<del> return None
<7>:<del>
<8>:<del> version = ct.c_int()
<9>:<del> try:
<10>:<del> check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ct.byref(version)))
<11>:<del> except AttributeError as e:
<12>:<del> CUDASetup.get_instance().add_log_entry(f'ERROR: {str(e)}')
<13>:<del> CUDASetup.get_instance().add_log_entry(f'CUDA SETUP: libcudart.so path is {cudart_path}')
<14>:<del> CUDASetup.get_instance().add_log_entry(f'CUDA SETUP: Is seems that your cuda installation is not in your path. See https://github.com/TimDettmers/bitsandbytes/issues/85 for more information.')
<15>:<del> version = int(version.value)
<16>:<del> major = version//1000
<17>:<del> minor = (version-(major*1000))//10
<18>:<add> major, minor = map(int, torch.version.cuda.split("."))
|
# module: bitsandbytes.cuda_setup.main
# https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION
+ def get_cuda_version():
- def get_cuda_version(cuda, cudart_path):
<0> if cuda is None: return None
<1>
<2> try:
<3> cudart = ct.CDLL(cudart_path)
<4> except OSError:
<5> CUDASetup.get_instance().add_log_entry(f'ERROR: libcudart.so could not be read from path: {cudart_path}!')
<6> return None
<7>
<8> version = ct.c_int()
<9> try:
<10> check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ct.byref(version)))
<11> except AttributeError as e:
<12> CUDASetup.get_instance().add_log_entry(f'ERROR: {str(e)}')
<13> CUDASetup.get_instance().add_log_entry(f'CUDA SETUP: libcudart.so path is {cudart_path}')
<14> CUDASetup.get_instance().add_log_entry(f'CUDA SETUP: Is seems that your cuda installation is not in your path. See https://github.com/TimDettmers/bitsandbytes/issues/85 for more information.')
<15> version = int(version.value)
<16> major = version//1000
<17> minor = (version-(major*1000))//10
<18>
<19> if major < 11:
<20> CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currently not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!')
<21>
<22> return f'{major}{minor}'
<23>
|
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
- def check_cuda_result(cuda, result_val):
- # 3. Check for CUDA errors
- if result_val != 0:
- error_str = ct.c_char_p()
- cuda.cuGetErrorString(result_val, ct.byref(error_str))
- if error_str.value is not None:
- CUDASetup.get_instance().add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}")
- else:
- CUDASetup.get_instance().add_log_entry(f"Unknown CUDA exception! Please check your CUDA install. It might also be that your GPU is too old.")
-
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
+ CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
- CUDASetup.get_instance().add_log_entry("WARNING: The following directories listed in your path were found to "
+ f"be non-existent: {non_existent_directories}", is_warning=False)
- f"be non-existent: {non_existent_directories}", is_warning=True)
return existent_directories
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
def is_cublasLt_compatible(cc):
has_cublaslt = False
if cc is not None:
cc_major, cc_minor = cc.split('.')
if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5):
+ CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU! \
- CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True)
+ If you run into issues with 8-bit matmul, you can try 4-bit quantization: https://huggingface.co/blog/4bit-transformers-bitsandbytes", is_warning=True)
else:
has_cublaslt = True
return has_cublaslt
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
def warn_in_case_of_duplicates(results_paths: Set[Path]) -> None:
if len(results_paths) > 1:
warning_msg = (
f"Found duplicate {CUDA_RUNTIME_LIBS} files: {results_paths}.. "
+ "We select the PyTorch default libcudart.so, which is {torch.version.cuda},"
+ "but this might missmatch with the CUDA version that is needed for bitsandbytes."
+ "To override this behavior set the BNB_CUDA_VERSION=<version string, e.g. 122> environmental variable"
+ "For example, if you want to use the CUDA version 122"
+ "BNB_CUDA_VERSION=122 python ..."
+ "OR set the environmental variable in your .bashrc: export BNB_CUDA_VERSION=122"
+ "In the case of a manual override, make sure you set the LD_LIBRARY_PATH, e.g."
+ "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-11.2")
- "We'll flip a coin and try one of these, in order to fail forward.\n"
- "Either way, this might cause trouble in the future:\n"
- "If you get `CUDA error: invalid device function` errors, the above "
- "might be the cause and the solution is to make sure only one "
- f"{CUDA_RUNTIME_LIBS} in the paths that we search based on your env.")
CUDASetup.get_instance().add_log_entry(warning_msg, is_warning=True)
===========changed ref 4===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
+ def manual_override(self):
+ if torch.cuda.is_available():
+ if 'BNB_CUDA_VERSION' in os.environ:
+ if len(os.environ['BNB_CUDA_VERSION']) > 0:
+ warn((f'\n\n{"="*80}\n'
+ 'WARNING: Manual override via BNB_CUDA_VERSION env variable detected!\n'
+ 'BNB_CUDA_VERSION=XXX can be used to load a bitsandbytes version that is different from the PyTorch CUDA version.\n'
+ 'If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\n'
+ 'If you use the manual override make sure the right libcudart.so is in your LD_LIBRARY_PATH\n'
+ 'For example by adding the following to your .bashrc: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<path_to_cuda_dir/lib64\n'
+ f'Loading CUDA version: BNB_CUDA_VERSION={os.environ["BNB_CUDA_VERSION"]}'
+ f'\n{"="*80}\n\n'))
+ self.binary_name = self.binary_name[:-6] + f'{os.environ["BNB_CUDA_VERSION"]}.so'
+
===========changed ref 5===========
# module: bitsandbytes.cuda_setup.main
def determine_cuda_runtime_lib_path() -> Union[Path, None]:
"""
Searches for a cuda installations, in the following order of priority:
1. active conda env
2. LD_LIBRARY_PATH
3. any other env vars, while ignoring those that
- are known to be unrelated (see `bnb.cuda_setup.env_vars.to_be_ignored`)
- don't contain the path separator `/`
If multiple libraries are found in part 3, we optimistically try one,
while giving a warning message.
"""
candidate_env_vars = get_potentially_lib_path_containing_env_vars()
+ cuda_runtime_libs = set()
if "CONDA_PREFIX" in candidate_env_vars:
conda_libs_path = Path(candidate_env_vars["CONDA_PREFIX"]) / "lib"
conda_cuda_libs = find_cuda_lib_in(str(conda_libs_path))
warn_in_case_of_duplicates(conda_cuda_libs)
if conda_cuda_libs:
+ cuda_runtime_libs.update(conda_cuda_libs)
- return next(iter(conda_cuda_libs))
CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["CONDA_PREFIX"]} did not contain '
f'{CUDA_RUNTIME_LIBS} as expected! Searching further paths...', is_warning=True)
if "LD_LIBRARY_PATH" in candidate_env_vars:
lib_ld_cuda_libs = find_cuda_lib_in(candidate_env_vars["LD_LIBRARY_PATH"])
if lib_ld_cuda_libs:
+ cuda_runtime_libs.update(lib_ld_cuda_libs)
- return next(iter(lib_ld_cuda_libs))
warn_in_case_of_duplicates(lib_</s>
|
bitsandbytes.cuda_setup.main/get_compute_capabilities
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6102029ab98b7dd9cc6651966b1e882f59961afe
|
Merge pull request #587 from BramVanroy/patch-1
|
<0>:<del> """
<1>:<del> 1. find libcuda.so library (GPU driver) (/usr/lib)
<2>:<del> init_device -> init variables -> call function by reference
<3>:<del> 2. call extern C function to determine CC
<4>:<del> (https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__DEVICE__DEPRECATED.html)
<5>:<del> 3. Check for CUDA errors
<6>:<del> https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
<7>:<del> # bits taken from https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549
<8>:<del> """
<9>:<del>
<10>:<del> nGpus = ct.c_int()
<11>:<del> cc_major = ct.c_int()
<12>:<del> cc_minor = ct.c_int()
<13>:<del>
<14>:<del> device = ct.c_int()
<15>:<del>
<16>:<del> check_cuda_result(cuda, cuda.cuDeviceGetCount(ct.byref(nGpus)))
<18>:<del> for i in range(nGpus.value):
<19>:<del> check_cuda_result(cuda, cuda.cuDeviceGet(ct.byref(device), i))
<20>:<del> ref_major = ct.byref(cc_major)
<21>:<del> ref_minor = ct.byref(cc_minor)
<22>:<del> # 2. call extern C function to determine CC
<23>:<del> check_cuda_result(cuda, cuda.cuDeviceComputeCapability(ref_major, ref_minor, device))
<24>:<add> for i in range(torch.cuda.device_count()):
<add> cc_major, cc_minor = torch.cuda.get_device_capability(torch.cuda.device(i))
<add> ccs.append(f"{cc_major}.{cc_minor}")
<del> ccs.append(f"{cc_major.value}.{cc_minor.value}")
|
# module: bitsandbytes.cuda_setup.main
+ def get_compute_capabilities():
- def get_compute_capabilities(cuda):
<0> """
<1> 1. find libcuda.so library (GPU driver) (/usr/lib)
<2> init_device -> init variables -> call function by reference
<3> 2. call extern C function to determine CC
<4> (https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__DEVICE__DEPRECATED.html)
<5> 3. Check for CUDA errors
<6> https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
<7> # bits taken from https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549
<8> """
<9>
<10> nGpus = ct.c_int()
<11> cc_major = ct.c_int()
<12> cc_minor = ct.c_int()
<13>
<14> device = ct.c_int()
<15>
<16> check_cuda_result(cuda, cuda.cuDeviceGetCount(ct.byref(nGpus)))
<17> ccs = []
<18> for i in range(nGpus.value):
<19> check_cuda_result(cuda, cuda.cuDeviceGet(ct.byref(device), i))
<20> ref_major = ct.byref(cc_major)
<21> ref_minor = ct.byref(cc_minor)
<22> # 2. call extern C function to determine CC
<23> check_cuda_result(cuda, cuda.cuDeviceComputeCapability(ref_major, ref_minor, device))
<24> ccs.append(f"{cc_major.value}.{cc_minor.value}")
<25>
<26> return ccs
<27>
|
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
- def get_cuda_lib_handle():
- # 1. find libcuda.so library (GPU driver) (/usr/lib)
- try:
- cuda = ct.CDLL("libcuda.so")
- except OSError:
- CUDASetup.get_instance().add_log_entry('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!')
- return None
- check_cuda_result(cuda, cuda.cuInit(0))
-
- return cuda
-
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
- def check_cuda_result(cuda, result_val):
- # 3. Check for CUDA errors
- if result_val != 0:
- error_str = ct.c_char_p()
- cuda.cuGetErrorString(result_val, ct.byref(error_str))
- if error_str.value is not None:
- CUDASetup.get_instance().add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}")
- else:
- CUDASetup.get_instance().add_log_entry(f"Unknown CUDA exception! Please check your CUDA install. It might also be that your GPU is too old.")
-
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
+ CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
- CUDASetup.get_instance().add_log_entry("WARNING: The following directories listed in your path were found to "
+ f"be non-existent: {non_existent_directories}", is_warning=False)
- f"be non-existent: {non_existent_directories}", is_warning=True)
return existent_directories
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
def is_cublasLt_compatible(cc):
has_cublaslt = False
if cc is not None:
cc_major, cc_minor = cc.split('.')
if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5):
+ CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU! \
- CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True)
+ If you run into issues with 8-bit matmul, you can try 4-bit quantization: https://huggingface.co/blog/4bit-transformers-bitsandbytes", is_warning=True)
else:
has_cublaslt = True
return has_cublaslt
===========changed ref 4===========
# module: bitsandbytes.cuda_setup.main
# https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION
+ def get_cuda_version():
- def get_cuda_version(cuda, cudart_path):
- if cuda is None: return None
-
- try:
- cudart = ct.CDLL(cudart_path)
- except OSError:
- CUDASetup.get_instance().add_log_entry(f'ERROR: libcudart.so could not be read from path: {cudart_path}!')
- return None
-
- version = ct.c_int()
- try:
- check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ct.byref(version)))
- except AttributeError as e:
- CUDASetup.get_instance().add_log_entry(f'ERROR: {str(e)}')
- CUDASetup.get_instance().add_log_entry(f'CUDA SETUP: libcudart.so path is {cudart_path}')
- CUDASetup.get_instance().add_log_entry(f'CUDA SETUP: Is seems that your cuda installation is not in your path. See https://github.com/TimDettmers/bitsandbytes/issues/85 for more information.')
- version = int(version.value)
- major = version//1000
- minor = (version-(major*1000))//10
+ major, minor = map(int, torch.version.cuda.split("."))
if major < 11:
CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currently not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!')
return f'{major}{minor}'
===========changed ref 5===========
# module: bitsandbytes.cuda_setup.main
def warn_in_case_of_duplicates(results_paths: Set[Path]) -> None:
if len(results_paths) > 1:
warning_msg = (
f"Found duplicate {CUDA_RUNTIME_LIBS} files: {results_paths}.. "
+ "We select the PyTorch default libcudart.so, which is {torch.version.cuda},"
+ "but this might missmatch with the CUDA version that is needed for bitsandbytes."
+ "To override this behavior set the BNB_CUDA_VERSION=<version string, e.g. 122> environmental variable"
+ "For example, if you want to use the CUDA version 122"
+ "BNB_CUDA_VERSION=122 python ..."
+ "OR set the environmental variable in your .bashrc: export BNB_CUDA_VERSION=122"
+ "In the case of a manual override, make sure you set the LD_LIBRARY_PATH, e.g."
+ "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-11.2")
- "We'll flip a coin and try one of these, in order to fail forward.\n"
- "Either way, this might cause trouble in the future:\n"
- "If you get `CUDA error: invalid device function` errors, the above "
- "might be the cause and the solution is to make sure only one "
- f"{CUDA_RUNTIME_LIBS} in the paths that we search based on your env.")
CUDASetup.get_instance().add_log_entry(warning_msg, is_warning=True)
|
bitsandbytes.cuda_setup.main/evaluate_cuda_setup
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6102029ab98b7dd9cc6651966b1e882f59961afe
|
Merge pull request #587 from BramVanroy/patch-1
|
<0>:<add> cuda_setup = CUDASetup.get_instance()
<1>:<add> cuda_setup.add_log_entry('')
<del> print('')
<2>:<add> cuda_setup.add_log_entry('='*35 + 'BUG REPORT' + '='*35)
<del> print('='*35 + 'BUG REPORT' + '='*35)
<3>:<add> cuda_setup.add_log_entry(('Welcome to bitsandbytes. For bug reports, please run\n\npython -m bitsandbytes\n\n'),
<del> print(('Welcome to bitsandbytes. For bug reports, please run\n\npython -m bitsandbytes\n\n'),
<5>:<add> cuda_setup.add_log_entry('='*80)
<del> print('='*80)
<6>:<add> if not torch.cuda.is_available(): return 'libbitsandbytes_cpu.so', None, None, None
<del> if not torch.cuda.is_available(): return 'libbitsandbytes_cpu.so', None, None, None, None
<8>:<del> cuda_setup = CUDASetup.get_instance()
<10>:<del> cuda = get_cuda_lib_handle()
<11>:<add> ccs = get_compute_capabilities()
<del> cc = get_compute_capability(cuda)
<12>:<add> ccs.sort()
<add> cc = ccs[-1] # we take the highest capability
<add> cuda_version_string = get_cuda_version()
<del> cuda_version_string = get_cuda_version(cuda, cudart_path)
<14>:<del> failure = False
<15>:<del> if cudart_path is None:
<16>:<del> failure = True
<17>:<del> cuda_setup.add_log_entry("WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!", is_warning=True)
<18>:<del> else:
<19>:<del> cuda_setup.add_log_entry(f"CUDA SETUP: CUDA runtime path found: {cudart_path}")
<20>:<add> cuda_setup.add_log_entry(f"CUDA SETUP: PyTorch settings found
|
# module: bitsandbytes.cuda_setup.main
def evaluate_cuda_setup():
<0> if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
<1> print('')
<2> print('='*35 + 'BUG REPORT' + '='*35)
<3> print(('Welcome to bitsandbytes. For bug reports, please run\n\npython -m bitsandbytes\n\n'),
<4> ('and submit this information together with your error trace to: https://github.com/TimDettmers/bitsandbytes/issues'))
<5> print('='*80)
<6> if not torch.cuda.is_available(): return 'libbitsandbytes_cpu.so', None, None, None, None
<7>
<8> cuda_setup = CUDASetup.get_instance()
<9> cudart_path = determine_cuda_runtime_lib_path()
<10> cuda = get_cuda_lib_handle()
<11> cc = get_compute_capability(cuda)
<12> cuda_version_string = get_cuda_version(cuda, cudart_path)
<13>
<14> failure = False
<15> if cudart_path is None:
<16> failure = True
<17> cuda_setup.add_log_entry("WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!", is_warning=True)
<18> else:
<19> cuda_setup.add_log_entry(f"CUDA SETUP: CUDA runtime path found: {cudart_path}")
<20>
<21> if cc == '' or cc is None:
<22> failure = True
<23> cuda_setup.add_log_entry("WARNING: No GPU detected! Check your CUDA paths. Proceeding to load CPU-only library...", is_warning=True)
<24> else:
<25> cuda_setup.add_log_entry(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}")
<26>
<27> if cuda is None:
<28> </s>
|
===========below chunk 0===========
# module: bitsandbytes.cuda_setup.main
def evaluate_cuda_setup():
# offset: 1
else:
cuda_setup.add_log_entry(f'CUDA SETUP: Detected CUDA version {cuda_version_string}')
# 7.5 is the minimum CC vor cublaslt
has_cublaslt = is_cublasLt_compatible(cc)
# TODO:
# (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible)
# (2) Multiple CUDA versions installed
# we use ls -l instead of nvcc to determine the cuda version
# since most installations will have the libcudart.so installed, but not the compiler
if failure:
binary_name = "libbitsandbytes_cpu.so"
elif has_cublaslt:
binary_name = f"libbitsandbytes_cuda{cuda_version_string}.so"
else:
"if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so"
binary_name = f"libbitsandbytes_cuda{cuda_version_string}_nocublaslt.so"
return binary_name, cudart_path, cuda, cc, cuda_version_string
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
- # def get_compute_capability()-> Union[List[str, ...], None]: # FIXME: error
- def get_compute_capability(cuda):
- """
- Extracts the highest compute capbility from all available GPUs, as compute
- capabilities are downwards compatible. If no GPUs are detected, it returns
- None.
- """
- if cuda is None: return None
-
- # TODO: handle different compute capabilities; for now, take the max
- ccs = get_compute_capabilities(cuda)
- if ccs: return ccs[-1]
-
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
- def get_cuda_lib_handle():
- # 1. find libcuda.so library (GPU driver) (/usr/lib)
- try:
- cuda = ct.CDLL("libcuda.so")
- except OSError:
- CUDASetup.get_instance().add_log_entry('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!')
- return None
- check_cuda_result(cuda, cuda.cuInit(0))
-
- return cuda
-
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
- def check_cuda_result(cuda, result_val):
- # 3. Check for CUDA errors
- if result_val != 0:
- error_str = ct.c_char_p()
- cuda.cuGetErrorString(result_val, ct.byref(error_str))
- if error_str.value is not None:
- CUDASetup.get_instance().add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}")
- else:
- CUDASetup.get_instance().add_log_entry(f"Unknown CUDA exception! Please check your CUDA install. It might also be that your GPU is too old.")
-
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
+ CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
- CUDASetup.get_instance().add_log_entry("WARNING: The following directories listed in your path were found to "
+ f"be non-existent: {non_existent_directories}", is_warning=False)
- f"be non-existent: {non_existent_directories}", is_warning=True)
return existent_directories
===========changed ref 4===========
# module: bitsandbytes.cuda_setup.main
def is_cublasLt_compatible(cc):
has_cublaslt = False
if cc is not None:
cc_major, cc_minor = cc.split('.')
if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5):
+ CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU! \
- CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True)
+ If you run into issues with 8-bit matmul, you can try 4-bit quantization: https://huggingface.co/blog/4bit-transformers-bitsandbytes", is_warning=True)
else:
has_cublaslt = True
return has_cublaslt
===========changed ref 5===========
# module: bitsandbytes.cuda_setup.main
# https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION
+ def get_cuda_version():
- def get_cuda_version(cuda, cudart_path):
- if cuda is None: return None
-
- try:
- cudart = ct.CDLL(cudart_path)
- except OSError:
- CUDASetup.get_instance().add_log_entry(f'ERROR: libcudart.so could not be read from path: {cudart_path}!')
- return None
-
- version = ct.c_int()
- try:
- check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ct.byref(version)))
- except AttributeError as e:
- CUDASetup.get_instance().add_log_entry(f'ERROR: {str(e)}')
- CUDASetup.get_instance().add_log_entry(f'CUDA SETUP: libcudart.so path is {cudart_path}')
- CUDASetup.get_instance().add_log_entry(f'CUDA SETUP: Is seems that your cuda installation is not in your path. See https://github.com/TimDettmers/bitsandbytes/issues/85 for more information.')
- version = int(version.value)
- major = version//1000
- minor = (version-(major*1000))//10
+ major, minor = map(int, torch.version.cuda.split("."))
if major < 11:
CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currently not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!')
return f'{major}{minor}'
|
bitsandbytes.autograd._functions/MatMul8bitLt.forward
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6102029ab98b7dd9cc6651966b1e882f59961afe
|
Merge pull request #587 from BramVanroy/patch-1
|
<29>:<add> A = A.reshape(-1, A.shape[-1])
<del> A = A.view(-1, A.shape[-1]).contiguous()
|
# module: bitsandbytes.autograd._functions
class MatMul8bitLt(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState):
<0> using_igemmlt = supports_igemmlt(A.device) and not state.force_no_igemmlt
<1> # default of pytorch behavior if inputs are empty
<2> ctx.is_empty = False
<3> if prod(A.shape) == 0:
<4> ctx.is_empty = True
<5> ctx.A = A
<6> ctx.B = B
<7> ctx.bias = bias
<8> if A.shape[-1] == B.shape[0]:
<9> return torch.empty(A.shape[:-1] + B.shape[1:], dtype=A.dtype, device=A.device)
<10> else:
<11> return torch.empty(A.shape[:-1] + B.shape[:1], dtype=A.dtype, device=A.device)
<12>
<13> # 1. Quantize A
<14> # 2. Quantize B
<15> # 3. Matmul
<16> # 4. Mixed-precision decomposition matmul
<17> # 5. Save state
<18> formatB = state.formatB
<19> input_shape = A.shape
<20> if state.outlier_pool is None:
<21> state.outlier_pool = GlobalOutlierPooler.get_instance()
<22>
<23> # Cast A to fp16
<24> if A.dtype != torch.float16:
<25> warnings.warn(f"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization")
<26>
<27> # 1. Quantize A
<28> if len(A.shape) == 3:
<29> A = A.view(-1, A.shape[-1]).contiguous()
<30> CA,</s>
|
===========below chunk 0===========
# module: bitsandbytes.autograd._functions
class MatMul8bitLt(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState):
# offset: 1
if state.threshold > 0.0 and coo_tensorA is not None:
if state.has_fp16_weights:
idx = torch.unique(coo_tensorA.colidx).long()
CA[:, idx] = 0
CAt[:, idx] = 0
subA = A[:, idx]
state.subB = B[:, idx].t().contiguous()
state.idx = idx
else:
if state.CxB is None and using_igemmlt:
# B in in 8-bit row-major, we can transform it back to 16-bit to extract outlier dimensions
# we also need to convert it to the turing/ampere format
state.CxB, state.SB = F.transform(state.CB, to_order=formatB)
else:
if not state.has_fp16_weights and state.CxB is None and using_igemmlt:
state.CxB, state.SB = F.transform(state.CB, to_order=formatB)
subA = None
# 2. Quantize B
if state.has_fp16_weights:
has_grad = True if (getattr(B, "grad", None) is not None) else False
is_transposed = not B.is_contiguous() and B.shape[0] == B.stride(1)
if is_transposed:
B = B.contiguous()
if (state.is_training and not has_grad) or state.CxB is None:
state.reset_grads()
(
CB,
state.CBt,
</s>
===========below chunk 1===========
# module: bitsandbytes.autograd._functions
class MatMul8bitLt(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState):
# offset: 2
<s>.CxB is None:
state.reset_grads()
(
CB,
state.CBt,
state.SCB,
state.SCBt,
coo_tensorB,
) = F.double_quant(B.to(torch.float16))
if using_igemmlt:
state.CxB, state.SB = F.transform(CB, to_order=formatB)
else:
state.CB = CB
else:
has_grad = False
if coo_tensorA is not None and not state.has_fp16_weights:
# extract outliers
outlier_idx = torch.unique(coo_tensorA.colidx)
state.idx = outlier_idx
# state.outlier_pool.add_outliers(outlier_idx, A.shape[-1])
# if state.use_pool and state.outlier_pool.model_dim == A.shape[-1]:
# # do not use pool for 2nd FFN layer
# state.idx = state.outlier_pool.get_current_outlier_idx().to(A.device)
# else:
# state.idx = outlier_idx
if state.CxB is not None:
outliers = F.extract_outliers(state.CxB, state.SB, state.idx.int())
else:
outliers = state.CB[:, state.idx.long()].clone()
</s>
===========below chunk 2===========
# module: bitsandbytes.autograd._functions
class MatMul8bitLt(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState):
# offset: 3
<s>subB = (outliers * state.SCB.view(-1, 1) / 127.0).t().contiguous().to(A.dtype)
CA[:, state.idx.long()] = 0
CAt[:, state.idx.long()] = 0
subA = A[:, state.idx.long()]
shapeB = state.SB[0] if state.SB else B.shape
if len(input_shape) == 3:
output_shape = (input_shape[0], input_shape[1], shapeB[0])
else:
output_shape = (input_shape[0], shapeB[0])
# 3. Matmul
if using_igemmlt:
C32A, SA = F.transform(CA, "col32")
out32, Sout32 = F.igemmlt(C32A, state.CxB, SA, state.SB)
if bias is None or bias.dtype == torch.float16:
# we apply the fused bias here
output = F.mm_dequant(out32, Sout32, SCA, state.SCB, bias=bias)
output = output.to(A.dtype)
else: # apply bias separately
output = F.mm_dequant(out32, Sout32, SCA, state.SCB, bias=None)
output = output.to(A.dtype).add_(bias)
else:
A_wo_outliers = A.clone</s>
===========below chunk 3===========
# module: bitsandbytes.autograd._functions
class MatMul8bitLt(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState):
# offset: 4
<s> if state.idx is not None:
A_wo_outliers[:, state.idx.long()] = 0
output = torch.nn.functional.linear(A_wo_outliers, state.CB.to(A.dtype))
output = output.mul_(state.SCB.unsqueeze(0).mul(1.0 / 127.0))
if bias is not None:
output = output.add_(bias)
# 4. Mixed-precision decomposition matmul
if coo_tensorA is not None and subA is not None:
output += torch.matmul(subA, state.subB)
# 5. Save state
ctx.state = state
ctx.formatB = formatB
ctx.grad_shape = input_shape
ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype
if any(ctx.needs_input_grad[:2]):
ctx.tensors = (CAt, subA, A)
ctx.tensor_states = (SCAt, state.idx)
else:
ctx.tensors = [None, None, A]
ctx.tensor_states = (None, None)
ctx.save_for_backward(None, None)
clone_func = torch.clone if len(output_shape) == 3 else lambda x: x
return clone_func(output.view(output_shape))
|
bitsandbytes.functional/quantize_blockwise
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6102029ab98b7dd9cc6651966b1e882f59961afe
|
Merge pull request #587 from BramVanroy/patch-1
|
<36>:<add> absmax = torch.zeros((blocks,), device=A.device, dtype=torch.float32)
<del> absmax = torch.zeros((blocks,), device=A.device)
|
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
<0> """
<1> Quantize tensor A in blocks of size 4096 values.
<2>
<3> Quantizes tensor A by dividing it into blocks of 4096 values.
<4> Then the absolute maximum value within these blocks is calculated
<5> for the non-linear quantization.
<6>
<7> Parameters
<8> ----------
<9> A : torch.Tensor
<10> The input tensor.
<11> code : torch.Tensor
<12> The quantization map.
<13> absmax : torch.Tensor
<14> The absmax values.
<15> out : torch.Tensor
<16> The output tensor (8-bit).
<17>
<18> Returns
<19> -------
<20> torch.Tensor:
<21> The 8-bit tensor.
<22> tuple(torch.Tensor, torch.Tensor):
<23> The quantization state to undo the quantization.
<24> """
<25>
<26>
<27> if code is None:
<28> if "dynamic" not in name2qmap:
<29> name2qmap["dynamic"] = create_dynamic_map().to(A.device)
<30> code = name2qmap["dynamic"]
<31>
<32> if absmax is None:
<33> n = A.numel()
<34> blocks = n // blocksize
<35> blocks += 1 if n % blocksize > 0 else 0
<36> absmax = torch.zeros((blocks,), device=A.device)
<37>
<38> if out is None:
<39> out = torch.zeros_like(A, dtype=torch.uint8)
<40>
<41> if A.device.type != 'cpu':
<42> assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64]
<43> cblocksize = ct.c_int32(blocksize)
<44> prev_device = pre_call(A.device)
<45> code = code.to(A.device)
<46> is_on_gpu([code, A, out, absmax])
<47> if A.</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
# offset: 1
lib.cquantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
elif A.dtype == torch.float16:
lib.cquantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
elif A.dtype == torch.bfloat16:
lib.cquantize_blockwise_bf16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
# cpu
code = code.cpu()
lib.cquantize_blockwise_cpu_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
if nested:
offset = absmax.mean()
absmax -= offset
qabsmax, state2 = quantize_blockwise(absmax, blocksize=blocksize, nested=False)
state = [qabsmax, code, blocksize, nested, A.dtype, offset, state2]
else:
state = [absmax, code, blocksize, nested, A.dtype, None, None]
return out, state
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
- # def get_compute_capability()-> Union[List[str, ...], None]: # FIXME: error
- def get_compute_capability(cuda):
- """
- Extracts the highest compute capbility from all available GPUs, as compute
- capabilities are downwards compatible. If no GPUs are detected, it returns
- None.
- """
- if cuda is None: return None
-
- # TODO: handle different compute capabilities; for now, take the max
- ccs = get_compute_capabilities(cuda)
- if ccs: return ccs[-1]
-
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
- def get_cuda_lib_handle():
- # 1. find libcuda.so library (GPU driver) (/usr/lib)
- try:
- cuda = ct.CDLL("libcuda.so")
- except OSError:
- CUDASetup.get_instance().add_log_entry('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!')
- return None
- check_cuda_result(cuda, cuda.cuInit(0))
-
- return cuda
-
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
- def check_cuda_result(cuda, result_val):
- # 3. Check for CUDA errors
- if result_val != 0:
- error_str = ct.c_char_p()
- cuda.cuGetErrorString(result_val, ct.byref(error_str))
- if error_str.value is not None:
- CUDASetup.get_instance().add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}")
- else:
- CUDASetup.get_instance().add_log_entry(f"Unknown CUDA exception! Please check your CUDA install. It might also be that your GPU is too old.")
-
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
+ CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
- CUDASetup.get_instance().add_log_entry("WARNING: The following directories listed in your path were found to "
+ f"be non-existent: {non_existent_directories}", is_warning=False)
- f"be non-existent: {non_existent_directories}", is_warning=True)
return existent_directories
===========changed ref 4===========
# module: bitsandbytes.cuda_setup.main
def is_cublasLt_compatible(cc):
has_cublaslt = False
if cc is not None:
cc_major, cc_minor = cc.split('.')
if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5):
+ CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU! \
- CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True)
+ If you run into issues with 8-bit matmul, you can try 4-bit quantization: https://huggingface.co/blog/4bit-transformers-bitsandbytes", is_warning=True)
else:
has_cublaslt = True
return has_cublaslt
===========changed ref 5===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
+ def manual_override(self):
+ if torch.cuda.is_available():
+ if 'BNB_CUDA_VERSION' in os.environ:
+ if len(os.environ['BNB_CUDA_VERSION']) > 0:
+ warn((f'\n\n{"="*80}\n'
+ 'WARNING: Manual override via BNB_CUDA_VERSION env variable detected!\n'
+ 'BNB_CUDA_VERSION=XXX can be used to load a bitsandbytes version that is different from the PyTorch CUDA version.\n'
+ 'If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\n'
+ 'If you use the manual override make sure the right libcudart.so is in your LD_LIBRARY_PATH\n'
+ 'For example by adding the following to your .bashrc: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<path_to_cuda_dir/lib64\n'
+ f'Loading CUDA version: BNB_CUDA_VERSION={os.environ["BNB_CUDA_VERSION"]}'
+ f'\n{"="*80}\n\n'))
+ self.binary_name = self.binary_name[:-6] + f'{os.environ["BNB_CUDA_VERSION"]}.so'
+
|
bitsandbytes.functional/dequantize_blockwise
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6102029ab98b7dd9cc6651966b1e882f59961afe
|
Merge pull request #587 from BramVanroy/patch-1
|
<35>:<add>
<38>:<add> if absmax.dtype != torch.float32: absmax = absmax.float()
|
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
<0> """
<1> Dequantizes blockwise quantized values.
<2>
<3> Dequantizes the tensor A with maximum absolute values absmax in
<4> blocks of size 4096.
<5>
<6> Parameters
<7> ----------
<8> A : torch.Tensor
<9> The input 8-bit tensor.
<10> quant_state : tuple(torch.Tensor, torch.Tensor)
<11> Tuple of code and absmax values.
<12> absmax : torch.Tensor
<13> The absmax values.
<14> code : torch.Tensor
<15> The quantization map.
<16> out : torch.Tensor
<17> Dequantized output tensor (default: float32)
<18>
<19>
<20> Returns
<21> -------
<22> torch.Tensor:
<23> Dequantized tensor (default: float32)
<24> """
<25> assert quant_state is not None or absmax is not None
<26> if code is None and quant_state is None:
<27> if "dynamic" not in name2qmap:
<28> name2qmap["dynamic"] = create_dynamic_map().to(A.device)
<29> code = name2qmap["dynamic"]
<30>
<31> if quant_state is None:
<32> quant_state = (absmax, code, blocksize, False, torch.float32, None, None)
<33>
<34> absmax, code, blocksize, nested, dtype, offset, state2 = quant_state
<35> if nested:
<36> absmax = dequantize_blockwise(absmax, state2)
<37> absmax += offset
<38>
<39> if out is None:
<40> out = torch.empty(A.shape, dtype=dtype, device=A.device)
<41>
<42> if A.device.type != 'cpu':
<43> device = pre_call(A.device)
<44> code =</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
# offset: 1
if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]:
raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048, 4096, 1024, 512, 256, 128, 64]")
is_on_gpu([A, absmax, out])
if out.dtype == torch.float32:
lib.cdequantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
elif out.dtype == torch.float16:
lib.cdequantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
elif out.dtype == torch.bfloat16:
lib.cdequantize_blockwise_bf16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
code = code.cpu()
lib.cdequantize_blockwise_cpu_fp32(get_ptr(quant_state[1]), get_ptr(A), get_ptr(quant_state[0]), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))</s>
===========below chunk 1===========
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
# offset: 2
<s>), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
return out
===========changed ref 0===========
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
"""
Quantize tensor A in blocks of size 4096 values.
Quantizes tensor A by dividing it into blocks of 4096 values.
Then the absolute maximum value within these blocks is calculated
for the non-linear quantization.
Parameters
----------
A : torch.Tensor
The input tensor.
code : torch.Tensor
The quantization map.
absmax : torch.Tensor
The absmax values.
out : torch.Tensor
The output tensor (8-bit).
Returns
-------
torch.Tensor:
The 8-bit tensor.
tuple(torch.Tensor, torch.Tensor):
The quantization state to undo the quantization.
"""
if code is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
if absmax is None:
n = A.numel()
blocks = n // blocksize
blocks += 1 if n % blocksize > 0 else 0
+ absmax = torch.zeros((blocks,), device=A.device, dtype=torch.float32)
- absmax = torch.zeros((blocks,), device=A.device)
if out is None:
out = torch.zeros_like(A, dtype=torch.uint8)
if A.device.type != 'cpu':
assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64]
cblocksize = ct.c_int32(blocksize)
prev_device = pre_call(A.device)
code = code.to(A.device)
is_on_gpu([code, A, out, absmax])
if A.dtype == torch.float32:
lib.cquantize_blockwise_fp32</s>
===========changed ref 1===========
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
# offset: 1
<s> out, absmax])
if A.dtype == torch.float32:
lib.cquantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
elif A.dtype == torch.float16:
lib.cquantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
elif A.dtype == torch.bfloat16:
lib.cquantize_blockwise_bf16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
# cpu
code = code.cpu()
lib.cquantize_blockwise_cpu_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
if nested:
offset = absmax.mean()
absmax -= offset
qabsmax, state2 = quantize_blockwise(absmax, blocksize=blocksize, nested=False)
state = [qabsmax, code, blocksize, nested, A.dtype, offset, state2]
else:
state = [absmax, code, blocksize, nested, A
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
- # def get_compute_capability()-> Union[List[str, ...], None]: # FIXME: error
- def get_compute_capability(cuda):
- """
- Extracts the highest compute capbility from all available GPUs, as compute
- capabilities are downwards compatible. If no GPUs are detected, it returns
- None.
- """
- if cuda is None: return None
-
- # TODO: handle different compute capabilities; for now, take the max
- ccs = get_compute_capabilities(cuda)
- if ccs: return ccs[-1]
-
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
- def get_cuda_lib_handle():
- # 1. find libcuda.so library (GPU driver) (/usr/lib)
- try:
- cuda = ct.CDLL("libcuda.so")
- except OSError:
- CUDASetup.get_instance().add_log_entry('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!')
- return None
- check_cuda_result(cuda, cuda.cuInit(0))
-
- return cuda
-
|
bitsandbytes.functional/get_4bit_type
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6102029ab98b7dd9cc6651966b1e882f59961afe
|
Merge pull request #587 from BramVanroy/patch-1
|
<3>:<add> ''' Implements the NF4 data type.
<add>
<add> Constructs a quantization data type where each bin has equal area under a standard normal distribution N(0, 1) that
<add> is normalized into the range [-1, 1].
<add>
<add> For more information read the paper: QLoRA: Efficient Finetuning of Quantized LLMs (https://arxiv.org/abs/2305.14314)
<add>
<add> Implementation of the NF4 data type in bitsandbytes can be found in the `create_normal_map` function in
<add> the `functional.py` file: https://github.com/TimDettmers/bitsandbytes/blob/main/bitsandbytes/functional.py#L236.
<add> '''
<16>:<add> # can also be created with bnb.functional.create_fp8_map(signed=True, exponent_bits=2, precision_bits=1, total_bits=4)
|
# module: bitsandbytes.functional
def get_4bit_type(typename, device=None, blocksize=64):
<0> if device is None: device = 'cuda'
<1> data = None
<2> if typename == 'nf4':
<3> data = [-1.0, -0.6961928009986877, -0.5250730514526367, -0.39491748809814453, -0.28444138169288635,
<4> -0.18477343022823334, -0.09105003625154495, 0.0, 0.07958029955625534, 0.16093020141124725,
<5> 0.24611230194568634, 0.33791524171829224, 0.44070982933044434, 0.5626170039176941,
<6> 0.7229568362236023, 1.0]
<7> elif typename == 'fp4':
<8> # 0b000 = 0
<9> # 0b001 = 0.0625
<10> # 0b010 = 8
<11> # 0b011 = 12
<12> # 0b100 = 4
<13> # 0b101 = 6
<14> # 0b110 = 2
<15> # 0b111 = 3
<16> data = [0, 0.0625, 8.0, 12.0, 4.0, 6.0, 2.0, 3.0, -0, -0.0625, -8.0, -12.0, -4.0, -6.0, -2.0, -3.0]
<17> elif typename == 'int4':
<18> data = [7, 6, 5, 4, 3, 2, 1, 0, -0, -1, -2, -3, -4, -5, -6, -7]
<19> elif typename == 'af4':
<20> # Taken from: NF4 Isn't Information Theoretically Optimal (and that's Good)
<21> # https://arxiv.</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def get_4bit_type(typename, device=None, blocksize=64):
# offset: 1
if blocksize == 64:
data = [-1., -0.69441008, -0.51243739, -0.3736951, -0.25607552, -0.14982478,
-0.04934812, 0., 0.04273164, 0.12934483, 0.21961274, 0.31675666,
0.42563882, 0.55496234, 0.72424863, 1.][::-1]
else:
raise NotImplementedError(f'4-bit AbnormalFloats currently only support blocksize 64.')
if data is None:
raise NotImplementedError(f'Typename {typename} not supported')
data = Tensor(data)
data /= data.abs().max()
assert data.numel() == 16
return data.to(device)
===========changed ref 0===========
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
"""
Dequantizes blockwise quantized values.
Dequantizes the tensor A with maximum absolute values absmax in
blocks of size 4096.
Parameters
----------
A : torch.Tensor
The input 8-bit tensor.
quant_state : tuple(torch.Tensor, torch.Tensor)
Tuple of code and absmax values.
absmax : torch.Tensor
The absmax values.
code : torch.Tensor
The quantization map.
out : torch.Tensor
Dequantized output tensor (default: float32)
Returns
-------
torch.Tensor:
Dequantized tensor (default: float32)
"""
assert quant_state is not None or absmax is not None
if code is None and quant_state is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
if quant_state is None:
quant_state = (absmax, code, blocksize, False, torch.float32, None, None)
absmax, code, blocksize, nested, dtype, offset, state2 = quant_state
+
if nested:
absmax = dequantize_blockwise(absmax, state2)
absmax += offset
+ if absmax.dtype != torch.float32: absmax = absmax.float()
if out is None:
out = torch.empty(A.shape, dtype=dtype, device=A.device)
if A.device.type != 'cpu':
device = pre_call(A.device)
code = code.to(A.device)
if blocksize not in [2048, 4096, 1024</s>
===========changed ref 1===========
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
# offset: 1
<s>(A.device)
code = code.to(A.device)
if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]:
raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048, 4096, 1024, 512, 256, 128, 64]")
is_on_gpu([A, absmax, out])
if out.dtype == torch.float32:
lib.cdequantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
elif out.dtype == torch.float16:
lib.cdequantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
elif out.dtype == torch.bfloat16:
lib.cdequantize_blockwise_bf16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
code = code.cpu()
lib.cdequantize_blockwise_cpu_fp32(get_ptr(quant_</s>
===========changed ref 2===========
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
# offset: 2
<s>1]), get_ptr(A), get_ptr(quant_state[0]), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
return out
===========changed ref 3===========
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
"""
Quantize tensor A in blocks of size 4096 values.
Quantizes tensor A by dividing it into blocks of 4096 values.
Then the absolute maximum value within these blocks is calculated
for the non-linear quantization.
Parameters
----------
A : torch.Tensor
The input tensor.
code : torch.Tensor
The quantization map.
absmax : torch.Tensor
The absmax values.
out : torch.Tensor
The output tensor (8-bit).
Returns
-------
torch.Tensor:
The 8-bit tensor.
tuple(torch.Tensor, torch.Tensor):
The quantization state to undo the quantization.
"""
if code is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
if absmax is None:
n = A.numel()
blocks = n // blocksize
blocks += 1 if n % blocksize > 0 else 0
+ absmax = torch.zeros((blocks,), device=A.device, dtype=torch.float32)
- absmax = torch.zeros((blocks,), device=A.device)
if out is None:
out = torch.zeros_like(A, dtype=torch.uint8)
if A.device.type != 'cpu':
assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64]
cblocksize = ct.c_int32(blocksize)
prev_device = pre_call(A.device)
code = code.to(A.device)
is_on_gpu([code, A, out, absmax])
if A.dtype == torch.float32:
lib.cquantize_blockwise_fp32</s>
|
bitsandbytes.functional/quantize_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6102029ab98b7dd9cc6651966b1e882f59961afe
|
Merge pull request #587 from BramVanroy/patch-1
|
<36>:<add> absmax = torch.zeros((blocks,), device=A.device, dtype=torch.float32)
<del> absmax = torch.zeros((blocks,), device=A.device)
|
# module: bitsandbytes.functional
def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4') -> Tensor:
<0> """
<1> Quantize tensor A in blocks of 4-bit values.
<2>
<3> Quantizes tensor A by dividing it into blocks which are independently quantized to FP4.
<4>
<5> Parameters
<6> ----------
<7> A : torch.Tensor
<8> The input tensor.
<9> absmax : torch.Tensor
<10> The absmax values.
<11> out : torch.Tensor
<12> The output tensor (8-bit).
<13> blocksize : int
<14> The blocksize used in quantization.
<15> quant_type : str
<16> The 4-bit quantization data type {fp4, nf4}
<17>
<18> Returns
<19> -------
<20> torch.Tensor:
<21> The 8-bit tensor with packed 4-bit values.
<22> tuple(torch.Tensor, torch.Size, torch.dtype, int):
<23> The quantization state to undo the quantization.
<24> """
<25> if A.device.type != 'cuda':
<26> raise NotImplementedError(f'Device type not supported for FP4 quantization: {A.device.type}')
<27> if quant_type not in ['fp4', 'nf4']:
<28> raise NotImplementedError(f'4-bit quantization data type {quant_type} is not implemented.')
<29>
<30> n = A.numel()
<31> input_shape = A.shape
<32>
<33> if absmax is None:
<34> blocks = n // blocksize
<35> blocks += 1 if n % blocksize > 0 else 0
<36> absmax = torch.zeros((blocks,), device=A.device)
<37>
<38>
<39> if out is None:
<40> out = torch.zeros(((n+1)//2, 1), dtype=torch.uint8, device=A.device)
<41>
<42> assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64]
<43>
<44> prev_device = pre_</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4') -> Tensor:
# offset: 1
is_on_gpu([A, out, absmax])
if A.dtype == torch.float32:
if quant_type == 'fp4':
lib.cquantize_blockwise_fp32_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
else:
lib.cquantize_blockwise_fp32_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
elif A.dtype == torch.float16:
if quant_type == 'fp4':
lib.cquantize_blockwise_fp16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
else:
lib.cquantize_blockwise_fp16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
elif A.dtype == torch.bfloat16:
if quant_type == 'fp4':
lib.cquantize_blockwise_bf16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
else:
lib.cquantize_blockwise_bf16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize</s>
===========below chunk 1===========
# module: bitsandbytes.functional
def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4') -> Tensor:
# offset: 2
<s>None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
datatype = get_4bit_type(quant_type, device=A.device)
if compress_statistics:
offset = absmax.mean()
absmax -= offset
qabsmax, state2 = quantize_blockwise(absmax, blocksize=256)
del absmax
state = [qabsmax, input_shape, A.dtype, blocksize, [offset, state2], quant_type, datatype]
else:
state = [absmax, input_shape, A.dtype, blocksize, None, quant_type, datatype]
return out, state
===========changed ref 0===========
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
"""
Dequantizes blockwise quantized values.
Dequantizes the tensor A with maximum absolute values absmax in
blocks of size 4096.
Parameters
----------
A : torch.Tensor
The input 8-bit tensor.
quant_state : tuple(torch.Tensor, torch.Tensor)
Tuple of code and absmax values.
absmax : torch.Tensor
The absmax values.
code : torch.Tensor
The quantization map.
out : torch.Tensor
Dequantized output tensor (default: float32)
Returns
-------
torch.Tensor:
Dequantized tensor (default: float32)
"""
assert quant_state is not None or absmax is not None
if code is None and quant_state is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
if quant_state is None:
quant_state = (absmax, code, blocksize, False, torch.float32, None, None)
absmax, code, blocksize, nested, dtype, offset, state2 = quant_state
+
if nested:
absmax = dequantize_blockwise(absmax, state2)
absmax += offset
+ if absmax.dtype != torch.float32: absmax = absmax.float()
if out is None:
out = torch.empty(A.shape, dtype=dtype, device=A.device)
if A.device.type != 'cpu':
device = pre_call(A.device)
code = code.to(A.device)
if blocksize not in [2048, 4096, 1024</s>
===========changed ref 1===========
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
# offset: 1
<s>(A.device)
code = code.to(A.device)
if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]:
raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048, 4096, 1024, 512, 256, 128, 64]")
is_on_gpu([A, absmax, out])
if out.dtype == torch.float32:
lib.cdequantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
elif out.dtype == torch.float16:
lib.cdequantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
elif out.dtype == torch.bfloat16:
lib.cdequantize_blockwise_bf16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
code = code.cpu()
lib.cdequantize_blockwise_cpu_fp32(get_ptr(quant_</s>
===========changed ref 2===========
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
# offset: 2
<s>1]), get_ptr(A), get_ptr(quant_state[0]), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
return out
|
bitsandbytes.functional/dequantize_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6102029ab98b7dd9cc6651966b1e882f59961afe
|
Merge pull request #587 from BramVanroy/patch-1
|
# module: bitsandbytes.functional
def dequantize_4bit(A: Tensor,quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64, quant_type='fp4') -> Tensor:
<0> """
<1> Dequantizes FP4 blockwise quantized values.
<2>
<3> Dequantizes the tensor A with maximum absolute values absmax in blocks of size blocksize.
<4>
<5> Parameters
<6> ----------
<7> A : torch.Tensor
<8> The input 8-bit tensor (packed 4-bit values).
<9> quant_state : tuple(torch.Tensor, torch.Size, torch.dtype)
<10> Tuple of absmax values, original tensor shape and original dtype.
<11> absmax : torch.Tensor
<12> The absmax values.
<13> out : torch.Tensor
<14> Dequantized output tensor.
<15> blocksize : int
<16> The blocksize used in quantization.
<17> quant_type : str
<18> The 4-bit quantization data type {fp4, nf4}
<19>
<20>
<21> Returns
<22> -------
<23> torch.Tensor:
<24> Dequantized tensor.
<25> """
<26> if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]:
<27> raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048, 4096, 1024, 512, 256, 128, 64]")
<28> if quant_type not in ['fp4', 'nf4']:
<29> raise NotImplementedError(f'4-bit quantization data type {quant_type} is not implemented.')
<30>
<31> if quant_state is None:
<32> assert absmax is not None and out is not None
<33> shape = out.shape
<34> dtype = out.dtype
<35> else:
<36> absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = quant_state
<37>
<38>
<39> if compressed_stats is not None:
<40> offset, state2 = compressed_stats
<41> absmax = dequantize_blockwise(absmax, state2)</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def dequantize_4bit(A: Tensor,quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64, quant_type='fp4') -> Tensor:
# offset: 1
if out is None:
out = torch.empty(shape, dtype=dtype, device=A.device)
n = out.numel()
device = pre_call(A.device)
is_on_gpu([A, absmax, out])
if out.dtype == torch.float32:
if quant_type == 'fp4':
lib.cdequantize_blockwise_fp32_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
else:
lib.cdequantize_blockwise_fp32_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
elif out.dtype == torch.float16:
if quant_type == 'fp4':
lib.cdequantize_blockwise_fp16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
else:
lib.cdequantize_blockwise_fp16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
elif out.dtype == torch.bfloat16:
if quant_type == 'fp4':
lib.cdequantize_blockwise_bf16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c</s>
===========below chunk 1===========
# module: bitsandbytes.functional
def dequantize_4bit(A: Tensor,quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64, quant_type='fp4') -> Tensor:
# offset: 2
<s>_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
else:
lib.cdequantize_blockwise_bf16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
is_transposed = (True if A.shape[0] == 1 else False)
if is_transposed: return out.t()
else: return out
===========changed ref 0===========
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
"""
Dequantizes blockwise quantized values.
Dequantizes the tensor A with maximum absolute values absmax in
blocks of size 4096.
Parameters
----------
A : torch.Tensor
The input 8-bit tensor.
quant_state : tuple(torch.Tensor, torch.Tensor)
Tuple of code and absmax values.
absmax : torch.Tensor
The absmax values.
code : torch.Tensor
The quantization map.
out : torch.Tensor
Dequantized output tensor (default: float32)
Returns
-------
torch.Tensor:
Dequantized tensor (default: float32)
"""
assert quant_state is not None or absmax is not None
if code is None and quant_state is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
if quant_state is None:
quant_state = (absmax, code, blocksize, False, torch.float32, None, None)
absmax, code, blocksize, nested, dtype, offset, state2 = quant_state
+
if nested:
absmax = dequantize_blockwise(absmax, state2)
absmax += offset
+ if absmax.dtype != torch.float32: absmax = absmax.float()
if out is None:
out = torch.empty(A.shape, dtype=dtype, device=A.device)
if A.device.type != 'cpu':
device = pre_call(A.device)
code = code.to(A.device)
if blocksize not in [2048, 4096, 1024</s>
===========changed ref 1===========
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
# offset: 1
<s>(A.device)
code = code.to(A.device)
if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]:
raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048, 4096, 1024, 512, 256, 128, 64]")
is_on_gpu([A, absmax, out])
if out.dtype == torch.float32:
lib.cdequantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
elif out.dtype == torch.float16:
lib.cdequantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
elif out.dtype == torch.bfloat16:
lib.cdequantize_blockwise_bf16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
code = code.cpu()
lib.cdequantize_blockwise_cpu_fp32(get_ptr(quant_</s>
===========changed ref 2===========
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
# offset: 2
<s>1]), get_ptr(A), get_ptr(quant_state[0]), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
return out
|
|
bitsandbytes.functional/quantize
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6102029ab98b7dd9cc6651966b1e882f59961afe
|
Merge pull request #587 from BramVanroy/patch-1
|
<7>:<add> if absmax.dtype != torch.float32: absmax = absmax.float()
|
# module: bitsandbytes.functional
def quantize(A: Tensor, code: Tensor = None, out: Tensor = None) -> Tensor:
<0> if code is None:
<1> if "dynamic" not in name2qmap:
<2> name2qmap["dynamic"] = create_dynamic_map().to(A.device)
<3> code = name2qmap["dynamic"]
<4> code = code.to(A.device)
<5>
<6> absmax = torch.abs(A).max()
<7> inp = A / absmax
<8> out = quantize_no_absmax(inp, code, out)
<9> return out, (absmax, code)
<10>
|
===========changed ref 0===========
# module: bitsandbytes.functional
def dequantize_4bit(A: Tensor,quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64, quant_type='fp4') -> Tensor:
"""
Dequantizes FP4 blockwise quantized values.
Dequantizes the tensor A with maximum absolute values absmax in blocks of size blocksize.
Parameters
----------
A : torch.Tensor
The input 8-bit tensor (packed 4-bit values).
quant_state : tuple(torch.Tensor, torch.Size, torch.dtype)
Tuple of absmax values, original tensor shape and original dtype.
absmax : torch.Tensor
The absmax values.
out : torch.Tensor
Dequantized output tensor.
blocksize : int
The blocksize used in quantization.
quant_type : str
The 4-bit quantization data type {fp4, nf4}
Returns
-------
torch.Tensor:
Dequantized tensor.
"""
if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]:
raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048, 4096, 1024, 512, 256, 128, 64]")
if quant_type not in ['fp4', 'nf4']:
raise NotImplementedError(f'4-bit quantization data type {quant_type} is not implemented.')
if quant_state is None:
assert absmax is not None and out is not None
shape = out.shape
dtype = out.dtype
else:
absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = quant_state
if compressed_stats is not None:
offset, state2 = compressed_stats
absmax = dequantize_blockwise(absmax, state2)
absmax += offset
+ if absmax.dtype != torch.float32: absmax = absmax.float()
if out is None:
out = torch.</s>
===========changed ref 1===========
# module: bitsandbytes.functional
def dequantize_4bit(A: Tensor,quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64, quant_type='fp4') -> Tensor:
# offset: 1
<s>.dtype != torch.float32: absmax = absmax.float()
if out is None:
out = torch.empty(shape, dtype=dtype, device=A.device)
n = out.numel()
device = pre_call(A.device)
is_on_gpu([A, absmax, out])
if out.dtype == torch.float32:
if quant_type == 'fp4':
lib.cdequantize_blockwise_fp32_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
else:
lib.cdequantize_blockwise_fp32_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
elif out.dtype == torch.float16:
if quant_type == 'fp4':
lib.cdequantize_blockwise_fp16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
else:
lib.cdequantize_blockwise_fp16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
elif out.dtype == torch.bfloat16:
if quant_type == 'fp4':
lib.c</s>
===========changed ref 2===========
# module: bitsandbytes.functional
def dequantize_4bit(A: Tensor,quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64, quant_type='fp4') -> Tensor:
# offset: 2
<s>ize_blockwise_bf16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
else:
lib.cdequantize_blockwise_bf16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
is_transposed = (True if A.shape[0] == 1 else False)
if is_transposed: return out.t()
else: return out
===========changed ref 3===========
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
"""
Dequantizes blockwise quantized values.
Dequantizes the tensor A with maximum absolute values absmax in
blocks of size 4096.
Parameters
----------
A : torch.Tensor
The input 8-bit tensor.
quant_state : tuple(torch.Tensor, torch.Tensor)
Tuple of code and absmax values.
absmax : torch.Tensor
The absmax values.
code : torch.Tensor
The quantization map.
out : torch.Tensor
Dequantized output tensor (default: float32)
Returns
-------
torch.Tensor:
Dequantized tensor (default: float32)
"""
assert quant_state is not None or absmax is not None
if code is None and quant_state is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
if quant_state is None:
quant_state = (absmax, code, blocksize, False, torch.float32, None, None)
absmax, code, blocksize, nested, dtype, offset, state2 = quant_state
+
if nested:
absmax = dequantize_blockwise(absmax, state2)
absmax += offset
+ if absmax.dtype != torch.float32: absmax = absmax.float()
if out is None:
out = torch.empty(A.shape, dtype=dtype, device=A.device)
if A.device.type != 'cpu':
device = pre_call(A.device)
code = code.to(A.device)
if blocksize not in [2048, 4096, 1024</s>
|
bitsandbytes.__main__/generate_bug_report_information
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
37c25c1e0db6b61d9a55ff5d6879eb50a10326e7
|
Merge branch 'main' of github.com:TimDettmers/bitsandbytes into main
|
<23>:<add> if 'LD_LIBRARY_PATH' in os.environ:
<add> lib_path = os.environ['LD_LIBRARY_PATH'].strip()
<del> lib_path = os.environ['LD_LIBRARY_PATH'].strip()
<24>:<add> for path in set(lib_path.split(':')):
<del> for path in set(lib_path.split(':')):
<25>:<add> try:
<del> try:
<26>:<add> if isdir(path):
<del> if isdir(path):
<27>:<add> print_header(f"{path} CUDA PATHS")
<del> print_header(f"{path} CUDA PATHS")
<28>:<add> paths = find_file_recursive(path, '*cuda*so')
<del> paths = find_file_recursive(path, '*cuda*so')
<29>:<add> print(paths)
<del> print(paths)
<30>:<add> except:
<del> except:
<31>:<add> print(f'Could not read LD_LIBRARY_PATH: {path}')
<del> print(f'Could not read LD_LIBRARY_PATH: {path}')
|
# module: bitsandbytes.__main__
def generate_bug_report_information():
<0> print_header("")
<1> print_header("BUG REPORT INFORMATION")
<2> print_header("")
<3> print('')
<4>
<5> if 'CONDA_PREFIX' in os.environ:
<6> paths = find_file_recursive(os.environ['CONDA_PREFIX'], '*cuda*so')
<7> print_header("ANACONDA CUDA PATHS")
<8> print(paths)
<9> print('')
<10> if isdir('/usr/local/'):
<11> paths = find_file_recursive('/usr/local', '*cuda*so')
<12> print_header("/usr/local CUDA PATHS")
<13> print(paths)
<14> print('')
<15>
<16> if isdir(os.getcwd()):
<17> paths = find_file_recursive(os.getcwd(), '*cuda*so')
<18> print_header("WORKING DIRECTORY CUDA PATHS")
<19> print(paths)
<20> print('')
<21>
<22> print_header("LD_LIBRARY CUDA PATHS")
<23> lib_path = os.environ['LD_LIBRARY_PATH'].strip()
<24> for path in set(lib_path.split(':')):
<25> try:
<26> if isdir(path):
<27> print_header(f"{path} CUDA PATHS")
<28> paths = find_file_recursive(path, '*cuda*so')
<29> print(paths)
<30> except:
<31> print(f'Could not read LD_LIBRARY_PATH: {path}')
<32> print('')
<33>
|
===========unchanged ref 0===========
at: bitsandbytes.__main__
find_file_recursive(folder, filename)
print_header(txt: str, width: int=HEADER_WIDTH, filler: str="+") -> None
at: os
getcwd() -> str
environ = _createenviron()
at: os.path
isdir(s: AnyPath) -> bool
|
bitsandbytes.cuda_setup.main/remove_non_existent_dirs
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
f3232d139164be384405d0cb788113ebb7d57cc2
|
Fixed bug where read-permission was assumed for a file. #497
|
<8>:<add> except PermissionError as pex:
<add> pass
|
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
<0> existent_directories: Set[Path] = set()
<1> for path in candidate_paths:
<2> try:
<3> if path.exists():
<4> existent_directories.add(path)
<5> except OSError as exc:
<6> if exc.errno != errno.ENAMETOOLONG:
<7> raise exc
<8>
<9> non_existent_directories: Set[Path] = candidate_paths - existent_directories
<10> if non_existent_directories:
<11> CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
<12> f"be non-existent: {non_existent_directories}", is_warning=False)
<13>
<14> return existent_directories
<15>
|
===========unchanged ref 0===========
at: bitsandbytes.cuda_setup.main
CUDASetup()
at: bitsandbytes.cuda_setup.main.CUDASetup
_instance = None
get_instance()
at: errno
ENAMETOOLONG: int
at: pathlib
Path()
at: pathlib.Path
__slots__ = ()
exists() -> bool
at: typing
Set = _alias(set, 1, inst=False, name='Set')
|
tests.test_functional/test_gemv_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
c82f51c0f784d8a43ebcb9cdefbf94e3f3b9c6c3
|
Increased occupancy.
|
<2>:<add> #for dim in [1*16]:
<del> #for dim in [1*128]:
|
<s>4', 'fp4'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
<0> for dim in [128, 256, 512, 1024]:
<1> #for dim in [4*1024]:
<2> #for dim in [1*128]:
<3> errs1 = []
<4> errs2 = []
<5> errs3 = []
<6> relerrs1 = []
<7> relerrs2 = []
<8> relerrs3 = []
<9> max_errs1 = []
<10> max_errs2 = []
<11> max_errs3 = []
<12>
<13>
<14> for i in range(100):
<15> if kind == 'fc1':
<16> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<17> B = torch.randn(dim*4, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<18> elif kind == 'fc2':
<19> A = torch.randn(1, 4*dim, dtype=dtype, device='cuda')
<20> B = torch.randn(dim, 4*dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<21> elif kind == 'attn':
<22> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<23> B = torch.randn(dim, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<24> elif kind == 'attn_packed':
<25> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<26> B = torch.randn(dim*3, dim, dtype=dtype, device</s>
|
===========below chunk 0===========
<s>'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 1
qB, state = F.quantize_4bit(B, quant_type=storage_type, compress_statistics=double_quant)
C3 = torch.matmul(A, B.t())
C2 = F.gemv_4bit(A, qB.t(), state=state)
A.requires_grad = True
C1 = bnb.matmul_4bit(A, qB.t(), state)
err1 = (C1-C2).abs().float()
err2 = (C3-C2).abs().float()
err3 = (C3-C1).abs().float()
mag1 = torch.abs(C1).float()+1e-5
mag2 = torch.abs(C3).float()+1e-5
mag3 = torch.abs(C3).float()+1e-5
relerr1 = err1/mag1
relerr2 = err2/mag2
relerr3 = err3/mag3
max_err1 = err1.max()
max_err2 = err2.max()
max_err3 = err3.max()
errs1.append(err1.mean().item())
errs2.append(err2.mean().item())
errs3.append(err3.mean().item())
relerrs1.append(relerr1.mean().item())
relerrs2.append(relerr2.mean().item())
relerrs3.append(relerr3.mean().item())
max_</s>
===========below chunk 1===========
<s>'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 2
<s>relerr2.mean().item())
relerrs3.append(relerr3.mean().item())
max_errs1.append(max_err1.item())
max_errs2.append(max_err2.item())
max_errs3.append(max_err3.item())
c = int(C1.numel()*0.0014*(dim/256))+1
c = assert_all_approx_close(C1, C2, 1e-5, 0.01, count=c, throw=False)
err1 = sum(errs1)/len(errs1)/math.sqrt(dim)
err2 = sum(errs2)/len(errs2)/math.sqrt(dim)
err3 = sum(errs3)/len(errs3)/math.sqrt(dim)
relerr1 = sum(relerrs1)/len(relerrs1)/math.sqrt(dim)
relerr2 = sum(relerrs2)/len(relerrs2)/math.sqrt(dim)
relerr3 = sum(relerrs3)/len(relerrs3)/math.sqrt(dim)
maxerr1 = sum(max_errs1)/len(max_errs1)/math.sqrt(dim)
maxerr2 = sum(max_errs2)/len(max_errs2)/math.sqrt(dim)
maxerr3 = sum(max</s>
===========below chunk 2===========
<s>'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 3
<s>s3)/len(max_errs3)/math.sqrt(dim)
absratio = err2/err3
relratio = relerr2/relerr3
maxratio = relerr2/relerr3
# for debugging if the tests fails
#
#print('='*80)
#print(f'For matmul: {A.shape}, {B.shape}, {kind}, {dtype}, {storage_type}, double_quant={double_quant}:')
#print(C1.flatten()[-20:])
#print(C2.flatten()[-20:])
#print(f'inference vs training abs: {err1}')
#print(f'inference vs training rel: {relerr1}')
#print(f'inference vs training max: {maxerr1}')
#print(f'inference vs training vs torch err ratio abs: {absratio}')
#print(f'inference vs training vs torch err ratio rel: {relratio}')
#print(f'inference vs training vs torch err ratio max: {maxratio}')
if dtype == torch.float16:
if dim <= 512:
assert err1 < 7e-5
assert relerr1 < 0.0008
else:
assert err1 < 6e-5
assert relerr1 < 2e-4
assert absratio < 1.005 and absratio > 0.995
assert relratio <</s>
===========below chunk 3===========
<s>'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 4
<s>005 and relratio > 0.995
assert maxratio < 1.005 and maxratio > 0.995
elif dtype == torch.float32:
if dim <= 512:
assert err1 < 5e-8
assert relerr1 < 1e-6
assert maxerr1 < 1e-7
else:
assert err1 < 5e-8
assert relerr1 < 8e-6
assert maxerr1 < 1e-7
assert absratio < 1.005 and absratio > 0.995
assert relratio < 1.005 and relratio > 0.995
assert maxratio < 1.005 and maxratio > 0.995
elif dtype == torch.bfloat16:
if dim <= 512:
assert err1 < 5e-4
assert relerr1 < 0.007
assert maxerr1 < 0.015
else:
assert err1 < 2e-4
assert relerr1 < 0.002
assert maxerr1 < 0.0012
assert absratio < 1.005 and absratio > 0.995
assert relratio < 1.04 and relratio > 0.96
assert maxratio < 1.02 and maxratio > 0.98
|
bitsandbytes.nn.modules/Linear4bit.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
412fd0e71702c064505ceb924d0b6a8f8ac4f476
|
Added better default compute_dtype handling for Linear4bit layers.
|
<3>:<add> self.compute_type_is_set = False
|
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4',device=None):
<0> super().__init__(input_features, output_features, bias, device)
<1> self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
<2> self.compute_dtype = compute_dtype
<3>
| |
bitsandbytes.nn.modules/Linear4bit.forward
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
412fd0e71702c064505ceb924d0b6a8f8ac4f476
|
Added better default compute_dtype handling for Linear4bit layers.
|
<6>:<add> if not self.compute_type_is_set:
<add> self.set_compute_type(x)
<add> self.compute_type_is_set = True
<add>
|
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
def forward(self, x: torch.Tensor):
<0> # weights are cast automatically as Int8Params, but the bias has to be cast manually
<1> if self.bias is not None and self.bias.dtype != x.dtype:
<2> self.bias.data = self.bias.data.to(x.dtype)
<3>
<4> if getattr(self.weight, 'quant_state', None) is None:
<5> print('FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first.')
<6> inp_dtype = x.dtype
<7> if self.compute_dtype is not None:
<8> x = x.to(self.compute_dtype)
<9>
<10> bias = None if self.bias is None else self.bias.to(self.compute_dtype)
<11> out = bnb.matmul_4bit(x, self.weight.t(), bias=bias, quant_state=self.weight.quant_state)
<12>
<13> out = out.to(inp_dtype)
<14>
<15> return out
<16>
|
===========changed ref 0===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4',device=None):
super().__init__(input_features, output_features, bias, device)
self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
self.compute_dtype = compute_dtype
+ self.compute_type_is_set = False
|
tests.test_modules/test_kbit_backprop
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
412fd0e71702c064505ceb924d0b6a8f8ac4f476
|
Added better default compute_dtype handling for Linear4bit layers.
|
# module: tests.test_modules
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
@pytest.mark.parametrize("module", modules, ids=names)
def test_kbit_backprop(module):
<0> b = 17
<1> dim1 = 37
<2> dim2 = 83
<3>
<4> ref = nn.Sequential(*[torch.nn.Linear(dim1, dim2), torch.nn.Linear(dim2, 10)])
<5> ref[1].weight.requires_grad = False
<6> torch.nn.init.kaiming_normal_(ref[0].weight)
<7> torch.nn.init.kaiming_normal_(ref[1].weight)
<8> kbit = nn.Sequential(*[torch.nn.Linear(dim1, dim2), module(dim2, 10)])
<9> kbit[0].weight.detach().copy_(ref[0].weight)
<10> kbit[1].weight.detach().copy_(ref[1].weight)
<11> kbit[0].bias.detach().copy_(ref[0].bias)
<12> kbit[1].bias.detach().copy_(ref[1].bias)
<13> ref = ref.half().cuda()
<14> kbit = kbit.half().cuda()
<15> kbit = kbit.half().to('cuda')
<16>
<17> errs1 = []
<18> errs2 = []
<19> relerrs1 = []
<20> relerrs2 = []
<21> for i in range(100):
<22> batch = torch.randn(b, dim1).half().cuda()
<23> out1 = ref(batch)
<24> out2 = kbit(batch)
<25> out1.mean().backward()
<26> out2.mean().backward()
<27>
<28> grad1 = ref[0].weight.grad
<29> grad2 = kbit[0].weight.grad
<30> bgrad1 = ref[0].bias.grad
<31> bgrad2 = kbit[0].bias.grad
<32>
<33> </s>
|
===========below chunk 0===========
# module: tests.test_modules
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
@pytest.mark.parametrize("module", modules, ids=names)
def test_kbit_backprop(module):
# offset: 1
err2 = (grad1-grad2).abs().float()
relerr1 = (err1/(out1.abs().float()+1e-9))
relerr2 = (err2/(grad1.abs().float()+1e-9))
errs1.append(err1.mean().item())
errs2.append(err2.mean().item())
relerrs1.append(relerr1.mean().item())
relerrs2.append(relerr2.mean().item())
if isinstance(module, bnb.nn.Linear8bitLt):
torch.testing.assert_close(grad1, grad2, atol=0.008, rtol=0.05)
torch.testing.assert_close(bgrad1, bgrad2, atol=0.008, rtol=0.05)
else:
torch.testing.assert_close(grad1, grad2, atol=0.015, rtol=0.05)
torch.testing.assert_close(bgrad1, bgrad2, atol=0.02, rtol=0.05)
ref.zero_grad()
kbit.zero_grad()
assert kbit[0].weight.grad is None or kbit[0].weight.grad.sum().item() == 0
assert kbit[0].weight.grad is None or kbit[0].bias.grad.sum().item() == 0
print('out', sum(errs1)/len(errs1))
print('grad', sum(errs2)/len(errs2))
print('rel out', sum(relerrs1)/len(relerrs1))
print('rel grad', sum(relerrs2)/len(relerrs2))
===========changed ref 0===========
# module: tests.test_modules
modules = []
modules.append(bnb.nn.Linear8bitLt)
modules.append(bnb.nn.Linear4bit)
modules.append(bnb.nn.LinearFP4)
modules.append(bnb.nn.LinearNF4)
modules.append(lambda d1, d2: bnb.nn.LinearFP4(d1, d2, compress_statistics=True))
modules.append(lambda d1, d2: bnb.nn.LinearNF4(d1, d2, compress_statistics=True))
+ modules.append(lambda d1, d2: bnb.nn.LinearFP4(d1, d2, compute_dtype=torch.float32))
+ modules.append(lambda d1, d2: bnb.nn.LinearFP4(d1, d2, compute_dtype=torch.float16))
+ modules.append(lambda d1, d2: bnb.nn.LinearFP4(d1, d2, compute_dtype=torch.bfloat16))
+ names = ['Int8Lt', '4bit', 'FP4', 'NF4', 'FP4+C', 'NF4+C', 'NF4+fp32', 'NF4+fp16', 'NF4+bf16']
- names = ['Int8Lt', '4bit', 'FP4', 'NF4', 'FP4+C', 'NF4+C']
===========changed ref 1===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4',device=None):
super().__init__(input_features, output_features, bias, device)
self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
self.compute_dtype = compute_dtype
+ self.compute_type_is_set = False
===========changed ref 2===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
def forward(self, x: torch.Tensor):
# weights are cast automatically as Int8Params, but the bias has to be cast manually
if self.bias is not None and self.bias.dtype != x.dtype:
self.bias.data = self.bias.data.to(x.dtype)
if getattr(self.weight, 'quant_state', None) is None:
print('FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first.')
+ if not self.compute_type_is_set:
+ self.set_compute_type(x)
+ self.compute_type_is_set = True
+
inp_dtype = x.dtype
if self.compute_dtype is not None:
x = x.to(self.compute_dtype)
bias = None if self.bias is None else self.bias.to(self.compute_dtype)
out = bnb.matmul_4bit(x, self.weight.t(), bias=bias, quant_state=self.weight.quant_state)
out = out.to(inp_dtype)
return out
===========changed ref 3===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
+ def set_compute_type(self, x):
+ if x.dtype in [torch.float32, torch.bfloat16]:
+ # the input is in a dtype that is safe to compute in, we switch
+ # to this type for speed and stability
+ self.compute_dtype = x.dtype
+ elif x.dtype == torch.float16:
+ # we take the compoute dtype passed into the layer
+ if self.compute_dtype == torch.float32 and (x.numel() == x.shape[-1]):
+ # single batch inference with input torch.float16 and compute_dtype float32 -> slow inference when it could be fast
+ # warn the user about this
+ warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_type=torch.float32 (default). This will lead to slow inference.')
+ warnings.filterwarnings('ignore', message='.*inference.')
+ if self.compute_dtype == torch.float32 and (x.numel() != x.shape[-1]):
+ warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_type=torch.float32 (default). This will lead to slow inference or training speed.')
+ warnings.filterwarnings('ignore', message='.*inference or training')
+
|
|
bitsandbytes.functional/create_dynamic_map
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
3c9aca9124ab8bcd160a8c90bba0d6ca361c141f
|
Fixed two bugs in dynamic data type creation.
|
<20>:<add> non_sign_bits = total_bits - (1 if signed else 1)
<del> non_sign_bits = total_bits - (1 if signed else 0)
<22>:<del> if not signed:
<23>:<del> additional_items = 2 * additional_items
<32>:<add> if additional_items > 0:
<del> if additional_items > 0:
<33>:<add> boundaries = torch.linspace(0.1, 1, additional_items + 1)
<del> boundaries = torch.linspace(0.1, 1, additional_items + 1)
<34>:<add> means = (boundaries[:-1] + boundaries[1:]) / 2.0
<del> means = (
|
# module: bitsandbytes.functional
def create_dynamic_map(signed=True, max_exponent_bits=7, total_bits=8):
<0> """
<1> Creates the dynamic quantiztion map.
<2>
<3> The dynamic data type is made up of a dynamic exponent and
<4> fraction. As the exponent increase from 0 to -7 the number
<5> of bits available for the fraction shrinks.
<6>
<7> This is a generalization of the dynamic type where a certain
<8> number of the bits and be reserved for the linear quantization
<9> region (the fraction). n determines the maximum number of
<10> exponent bits.
<11>
<12> For more details see
<13> (8-Bit Approximations for Parallelism in Deep Learning)[https://arxiv.org/abs/1511.04561]
<14> """
<15>
<16> data = []
<17> # these are additional items that come from the case
<18> # where all the exponent bits are zero and no
<19> # indicator bit is present
<20> non_sign_bits = total_bits - (1 if signed else 0)
<21> additional_items = 2 ** (non_sign_bits - max_exponent_bits) - 1
<22> if not signed:
<23> additional_items = 2 * additional_items
<24> for i in range(max_exponent_bits):
<25> fraction_items = int((2 ** (i + non_sign_bits - max_exponent_bits) + 1 if signed else 2 ** (i + non_sign_bits - max_exponent_bits + 1) + 1))
<26> boundaries = torch.linspace(0.1, 1, fraction_items)
<27> means = (boundaries[:-1] + boundaries[1:]) / 2.0
<28> data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
<29> if signed:
<30> data += (-(10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
<31>
<32> if additional_items > 0:
<33> boundaries = torch.linspace(0.1, 1, additional_items + 1)
<34> means = (</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def create_dynamic_map(signed=True, max_exponent_bits=7, total_bits=8):
# offset: 1
data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
if signed:
data += (-(10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
data.append(0)
data.append(1.0)
gap = 256 - len(data)
for i in range(gap):
data.append(0)
data.sort()
return Tensor(data)
===========unchanged ref 0===========
at: torch._C._VariableFunctions
linspace(start: Number, end: Number, steps: Optional[_int]=None, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor
linspace(start: Union[Number, _complex], end: Union[Number, _complex], steps: _int, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
|
tests.test_functional/test_dynamic_quantization
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
3c9aca9124ab8bcd160a8c90bba0d6ca361c141f
|
Fixed two bugs in dynamic data type creation.
|
<11>:<add> print(sum(diffs)/len(diffs))
<del> # print(sum(diffs)/len(diffs))
<12>:<add> print(sum(reldiffs)/len(reldiffs))
<del> # print(sum(reldiffs)/len(reldiffs))
|
# module: tests.test_functional
def test_dynamic_quantization():
<0> diffs = []
<1> reldiffs = []
<2> for i in range(100):
<3> A1 = torch.randn(1024, 1024, device="cuda")
<4> C, S = F.quantize(A1)
<5> A2 = F.dequantize(C, S)
<6> diff = torch.abs(A1 - A2)
<7> reldiff = diff / torch.abs(A1 + 1e-8)
<8> diffs.append(diff.mean().item())
<9> reldiffs.append(reldiff.mean().item())
<10> assert diff.mean().item() < 0.0135
<11> # print(sum(diffs)/len(diffs))
<12> # print(sum(reldiffs)/len(reldiffs))
<13>
<14> for i in range(100):
<15> A1 = torch.rand(1024, 1024, device="cuda")
<16> C, S = F.quantize(A1)
<17> A2 = F.dequantize(C, S)
<18> diff = torch.abs(A1 - A2).mean().item()
<19> torch.testing.assert_close(A1, A2, atol=1e-2, rtol=0)
<20> assert diff < 0.004
<21>
|
===========changed ref 0===========
# module: bitsandbytes.functional
def create_dynamic_map(signed=True, max_exponent_bits=7, total_bits=8):
"""
Creates the dynamic quantiztion map.
The dynamic data type is made up of a dynamic exponent and
fraction. As the exponent increase from 0 to -7 the number
of bits available for the fraction shrinks.
This is a generalization of the dynamic type where a certain
number of the bits and be reserved for the linear quantization
region (the fraction). n determines the maximum number of
exponent bits.
For more details see
(8-Bit Approximations for Parallelism in Deep Learning)[https://arxiv.org/abs/1511.04561]
"""
data = []
# these are additional items that come from the case
# where all the exponent bits are zero and no
# indicator bit is present
+ non_sign_bits = total_bits - (1 if signed else 1)
- non_sign_bits = total_bits - (1 if signed else 0)
additional_items = 2 ** (non_sign_bits - max_exponent_bits) - 1
- if not signed:
- additional_items = 2 * additional_items
for i in range(max_exponent_bits):
fraction_items = int((2 ** (i + non_sign_bits - max_exponent_bits) + 1 if signed else 2 ** (i + non_sign_bits - max_exponent_bits + 1) + 1))
boundaries = torch.linspace(0.1, 1, fraction_items)
means = (boundaries[:-1] + boundaries[1:]) / 2.0
data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
if signed:
data += (-(10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
+ if additional_items > 0:
- if additional_items > 0:
+ boundaries = torch.linspace(0.1, 1, additional_items + 1)
- boundaries =</s>
===========changed ref 1===========
# module: bitsandbytes.functional
def create_dynamic_map(signed=True, max_exponent_bits=7, total_bits=8):
# offset: 1
<s> > 0:
+ boundaries = torch.linspace(0.1, 1, additional_items + 1)
- boundaries = torch.linspace(0.1, 1, additional_items + 1)
+ means = (boundaries[:-1] + boundaries[1:]) / 2.0
- means = (boundaries[:-1] + boundaries[1:]) / 2.0
+ data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
+ if signed:
+ data += (-(10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
- data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
- if signed:
- data += (-(10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
data.append(0)
data.append(1.0)
+
+ assert len(data) == 2**total_bits
gap = 256 - len(data)
for i in range(gap):
data.append(0)
data.sort()
return Tensor(data)
|
tests.test_functional/test_dynamic_blockwise_quantization
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
3c9aca9124ab8bcd160a8c90bba0d6ca361c141f
|
Fixed two bugs in dynamic data type creation.
|
<20>:<add> code = F.create_dynamic_map(signed=signed)
<22>:<add> C, S = F.quantize_blockwise(A1, blocksize=blocksize, nested=nested, code=code)
<del> C, S = F.quantize_blockwise(A1, blocksize=blocksize, nested=nested)
|
<s>test.mark.parametrize("nested", [False, True], ids=["False", "True"])
@pytest.mark.parametrize("blocksize", [4096, 2048, 1024, 512, 256, 128, 64])
+ @pytest.mark.parametrize("signed", [True, False], ids=['signed_True', 'signed_False'])
+ def test_dynamic_blockwise_quantization(dtype, nested, blocksize, signed):
- def test_dynamic_blockwise_quantization(dtype, nested, blocksize):
<0> #print('')
<1> diffs = []
<2> reldiffs = []
<3> for i in range(100):
<4> A1 = torch.randn(1024, 1024, device="cuda", dtype=dtype)
<5> C, S = F.quantize_blockwise(A1, blocksize=blocksize, nested=nested)
<6> A2 = F.dequantize_blockwise(C, S)
<7> diff = torch.abs(A1 - A2).float()
<8> reldiff = diff / torch.abs(A1.float() + 1e-8)
<9> diffs.append(diff.mean().item())
<10> reldiffs.append(reldiff.mean().item())
<11> abserr = sum(diffs)/len(diffs)
<12> relerr = sum(reldiffs)/len(reldiffs)
<13> #print('nested=', nested, 'randn', blocksize, 'dtype', dtype, sum(diffs)/len(diffs))
<14> #print('nested=', nested, 'randn', blocksize, 'dtype', dtype, sum(reldiffs)/len(reldiffs))
<15> assert abserr < 0.011
<16> assert relerr < 0.018
<17> assert A2.dtype == dtype
<18>
<19> diffs = []
<20> for i in range(100):
<21> A1 = torch.rand(1024, 1024, device="cuda", dtype=dtype)
<22> C, S = F.quantize_blockwise(A1, blocksize=blocksize, nested=nested)
<23> A</s>
|
===========below chunk 0===========
<s>etrize("nested", [False, True], ids=["False", "True"])
@pytest.mark.parametrize("blocksize", [4096, 2048, 1024, 512, 256, 128, 64])
+ @pytest.mark.parametrize("signed", [True, False], ids=['signed_True', 'signed_False'])
+ def test_dynamic_blockwise_quantization(dtype, nested, blocksize, signed):
- def test_dynamic_blockwise_quantization(dtype, nested, blocksize):
# offset: 1
diff = torch.abs(A1 - A2).float()
reldiff = diff / torch.abs(A1.float() + 1e-8)
diffs.append(diff.mean().item())
reldiffs.append(reldiff.mean().item())
#torch.testing.assert_close(A1, A2, atol=1e-2, rtol=0)
abserr = sum(diffs)/len(diffs)
relerr = sum(reldiffs)/len(reldiffs)
assert abserr < 0.0035
assert relerr < 0.015
assert A2.dtype == dtype
===========changed ref 0===========
# module: tests.test_functional
def test_dynamic_quantization():
diffs = []
reldiffs = []
for i in range(100):
A1 = torch.randn(1024, 1024, device="cuda")
C, S = F.quantize(A1)
A2 = F.dequantize(C, S)
diff = torch.abs(A1 - A2)
reldiff = diff / torch.abs(A1 + 1e-8)
diffs.append(diff.mean().item())
reldiffs.append(reldiff.mean().item())
assert diff.mean().item() < 0.0135
+ print(sum(diffs)/len(diffs))
- # print(sum(diffs)/len(diffs))
+ print(sum(reldiffs)/len(reldiffs))
- # print(sum(reldiffs)/len(reldiffs))
for i in range(100):
A1 = torch.rand(1024, 1024, device="cuda")
C, S = F.quantize(A1)
A2 = F.dequantize(C, S)
diff = torch.abs(A1 - A2).mean().item()
torch.testing.assert_close(A1, A2, atol=1e-2, rtol=0)
assert diff < 0.004
===========changed ref 1===========
# module: bitsandbytes.functional
def create_dynamic_map(signed=True, max_exponent_bits=7, total_bits=8):
"""
Creates the dynamic quantiztion map.
The dynamic data type is made up of a dynamic exponent and
fraction. As the exponent increase from 0 to -7 the number
of bits available for the fraction shrinks.
This is a generalization of the dynamic type where a certain
number of the bits and be reserved for the linear quantization
region (the fraction). n determines the maximum number of
exponent bits.
For more details see
(8-Bit Approximations for Parallelism in Deep Learning)[https://arxiv.org/abs/1511.04561]
"""
data = []
# these are additional items that come from the case
# where all the exponent bits are zero and no
# indicator bit is present
+ non_sign_bits = total_bits - (1 if signed else 1)
- non_sign_bits = total_bits - (1 if signed else 0)
additional_items = 2 ** (non_sign_bits - max_exponent_bits) - 1
- if not signed:
- additional_items = 2 * additional_items
for i in range(max_exponent_bits):
fraction_items = int((2 ** (i + non_sign_bits - max_exponent_bits) + 1 if signed else 2 ** (i + non_sign_bits - max_exponent_bits + 1) + 1))
boundaries = torch.linspace(0.1, 1, fraction_items)
means = (boundaries[:-1] + boundaries[1:]) / 2.0
data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
if signed:
data += (-(10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
+ if additional_items > 0:
- if additional_items > 0:
+ boundaries = torch.linspace(0.1, 1, additional_items + 1)
- boundaries =</s>
===========changed ref 2===========
# module: bitsandbytes.functional
def create_dynamic_map(signed=True, max_exponent_bits=7, total_bits=8):
# offset: 1
<s> > 0:
+ boundaries = torch.linspace(0.1, 1, additional_items + 1)
- boundaries = torch.linspace(0.1, 1, additional_items + 1)
+ means = (boundaries[:-1] + boundaries[1:]) / 2.0
- means = (boundaries[:-1] + boundaries[1:]) / 2.0
+ data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
+ if signed:
+ data += (-(10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
- data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
- if signed:
- data += (-(10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
data.append(0)
data.append(1.0)
+
+ assert len(data) == 2**total_bits
gap = 256 - len(data)
for i in range(gap):
data.append(0)
data.sort()
return Tensor(data)
|
bitsandbytes.autograd._functions/MatMul4Bit.forward
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
61a4a20da91b7f780a98d3ff8235f1455835ecf2
|
use QuantState class for quant_state
|
<7>:<add> B_shape = quant_state.shape
<del> B_shape = state[1]
<16>:<add> output = torch.nn.functional.linear(A, F.dequantize_4bit(B, quant_state).to(A.dtype).t(), bias)
<del> output = torch.nn.functional.linear(A, F.dequantize_4bit(B, state).to(A.dtype).t(), bias)
<19>:<add> ctx.state = quant_state
<del> ctx.state = state
|
<s>._functions
class MatMul4Bit(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
+ def forward(ctx, A, B, out=None, bias=None, quant_state: F.QuantState = None):
- def forward(ctx, A, B, out=None, bias=None, state=None):
<0> # default of pytorch behavior if inputs are empty
<1> ctx.is_empty = False
<2> if prod(A.shape) == 0:
<3> ctx.is_empty = True
<4> ctx.A = A
<5> ctx.B = B
<6> ctx.bias = bias
<7> B_shape = state[1]
<8> if A.shape[-1] == B_shape[0]:
<9> return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)
<10> else:
<11> return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)
<12>
<13>
<14> # 1. Dequantize
<15> # 2. MatmulnN
<16> output = torch.nn.functional.linear(A, F.dequantize_4bit(B, state).to(A.dtype).t(), bias)
<17>
<18> # 3. Save state
<19> ctx.state = state
<20> ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype
<21>
<22> if any(ctx.needs_input_grad[:2]):
<23> ctx.tensors = (A, B)
<24> else:
<25> ctx.tensors = (None, None)
<26>
<27> return output
<28>
| |
bitsandbytes.autograd._functions/MatMul4Bit.backward
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
61a4a20da91b7f780a98d3ff8235f1455835ecf2
|
use QuantState class for quant_state
|
<6>:<del> state = ctx.state
|
# module: bitsandbytes.autograd._functions
class MatMul4Bit(torch.autograd.Function):
@staticmethod
def backward(ctx, grad_output):
<0> if ctx.is_empty:
<1> bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)
<2> return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None
<3>
<4> req_gradA, _, _, req_gradBias, _= ctx.needs_input_grad
<5> A, B = ctx.tensors
<6> state = ctx.state
<7>
<8> grad_A, grad_B, grad_bias = None, None, None
<9>
<10> if req_gradBias:
<11> # compute grad_bias first before changing grad_output dtype
<12> grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)
<13>
<14> # not supported by PyTorch. TODO: create work-around
<15> #if req_gradB: grad_B = torch.matmul(grad_output.t(), A)
<16> if req_gradA: grad_A = torch.matmul(grad_output, F.dequantize_4bit(B, ctx.state).to(grad_output.dtype).t())
<17>
<18> return grad_A, grad_B, None, grad_bias, None
<19>
|
===========changed ref 0===========
<s>._functions
class MatMul4Bit(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
+ def forward(ctx, A, B, out=None, bias=None, quant_state: F.QuantState = None):
- def forward(ctx, A, B, out=None, bias=None, state=None):
# default of pytorch behavior if inputs are empty
ctx.is_empty = False
if prod(A.shape) == 0:
ctx.is_empty = True
ctx.A = A
ctx.B = B
ctx.bias = bias
+ B_shape = quant_state.shape
- B_shape = state[1]
if A.shape[-1] == B_shape[0]:
return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)
else:
return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)
# 1. Dequantize
# 2. MatmulnN
+ output = torch.nn.functional.linear(A, F.dequantize_4bit(B, quant_state).to(A.dtype).t(), bias)
- output = torch.nn.functional.linear(A, F.dequantize_4bit(B, state).to(A.dtype).t(), bias)
# 3. Save state
+ ctx.state = quant_state
- ctx.state = state
ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype
if any(ctx.needs_input_grad[:2]):
ctx.tensors = (A, B)
else:
ctx.tensors = (None, None)
</s>
===========changed ref 1===========
<s>Mul4Bit(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
+ def forward(ctx, A, B, out=None, bias=None, quant_state: F.QuantState = None):
- def forward(ctx, A, B, out=None, bias=None, state=None):
# offset: 1
<s> ctx.tensors = (A, B)
else:
ctx.tensors = (None, None)
return output
|
bitsandbytes.autograd._functions/matmul_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
61a4a20da91b7f780a98d3ff8235f1455835ecf2
|
use QuantState class for quant_state
|
<2>:<del> absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = quant_state
<3>:<add> if A.shape[-1] % quant_state.blocksize != 0:
<del> if A.shape[-1] % blocksize != 0:
<4>:<add> warn(f'Some matrices hidden dimension is not a multiple of {quant_state.blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}')
<del> warn(f'Some matrices hidden dimension is not a multiple of {blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}')
<7>:<add> out = F.gemv_4bit(A, B.t(), out, quant_state=quant_state)
<del> out = F.gemv_4bit(A, B.t(), out, state=quant_state)
|
# module: bitsandbytes.autograd._functions
+ def matmul_4bit(A: tensor, B: tensor, quant_state: F.QuantState, out: tensor = None, bias=None):
- def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
<0> assert quant_state is not None
<1> if A.numel() == A.shape[-1] and A.requires_grad == False:
<2> absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = quant_state
<3> if A.shape[-1] % blocksize != 0:
<4> warn(f'Some matrices hidden dimension is not a multiple of {blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}')
<5> return MatMul4Bit.apply(A, B, out, bias, quant_state)
<6> else:
<7> out = F.gemv_4bit(A, B.t(), out, state=quant_state)
<8> if bias is not None:
<9> out += bias
<10> return out
<11> else:
<12> return MatMul4Bit.apply(A, B, out, bias, quant_state)
<13>
|
===========changed ref 0===========
# module: bitsandbytes.autograd._functions
class MatMul4Bit(torch.autograd.Function):
@staticmethod
def backward(ctx, grad_output):
if ctx.is_empty:
bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)
return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None
req_gradA, _, _, req_gradBias, _= ctx.needs_input_grad
A, B = ctx.tensors
- state = ctx.state
grad_A, grad_B, grad_bias = None, None, None
if req_gradBias:
# compute grad_bias first before changing grad_output dtype
grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)
# not supported by PyTorch. TODO: create work-around
#if req_gradB: grad_B = torch.matmul(grad_output.t(), A)
if req_gradA: grad_A = torch.matmul(grad_output, F.dequantize_4bit(B, ctx.state).to(grad_output.dtype).t())
return grad_A, grad_B, None, grad_bias, None
===========changed ref 1===========
<s>._functions
class MatMul4Bit(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
+ def forward(ctx, A, B, out=None, bias=None, quant_state: F.QuantState = None):
- def forward(ctx, A, B, out=None, bias=None, state=None):
# default of pytorch behavior if inputs are empty
ctx.is_empty = False
if prod(A.shape) == 0:
ctx.is_empty = True
ctx.A = A
ctx.B = B
ctx.bias = bias
+ B_shape = quant_state.shape
- B_shape = state[1]
if A.shape[-1] == B_shape[0]:
return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)
else:
return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)
# 1. Dequantize
# 2. MatmulnN
+ output = torch.nn.functional.linear(A, F.dequantize_4bit(B, quant_state).to(A.dtype).t(), bias)
- output = torch.nn.functional.linear(A, F.dequantize_4bit(B, state).to(A.dtype).t(), bias)
# 3. Save state
+ ctx.state = quant_state
- ctx.state = state
ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype
if any(ctx.needs_input_grad[:2]):
ctx.tensors = (A, B)
else:
ctx.tensors = (None, None)
</s>
===========changed ref 2===========
<s>Mul4Bit(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
+ def forward(ctx, A, B, out=None, bias=None, quant_state: F.QuantState = None):
- def forward(ctx, A, B, out=None, bias=None, state=None):
# offset: 1
<s> ctx.tensors = (A, B)
else:
ctx.tensors = (None, None)
return output
|
bitsandbytes.nn.modules/Params4bit.to
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
61a4a20da91b7f780a98d3ff8235f1455835ecf2
|
use QuantState class for quant_state
|
<5>:<add> if self.quant_state is not None:
<add> self.quant_state.to(device)
<del> s = self.quant_state
<6>:<del> if s is not None:
<7>:<del> # make sure the quantization state is on the right device
<8>:<del> s[0] = s[0].to(device)
<9>:<del> if self.compress_statistics:
<10>:<del> # TODO: refactor this. This is a nightmare
<11>:<del> # for 4-bit:
<12>:<del> # state = [qabsmax, input_shape, A.dtype, blocksize, [offset, state2], quant_type]
<13>:<del> # state2 = [absmax, input_shape, A.dtype, blocksize, None, quant_type]
<14>:<del> #s[-2][0] = s[-2][0].to(device) # offset
<15>:<del> #s[-2][1][0] = s[-2][1][0].to(device) # nested absmax
<17>:<del> # for 8-bit
<18>:<del> s[-3][0] = s[-3][0].to(device) # offset
<19>:<del> s[-3][1][0] = s[-3][1][0].to(device) # nested quantiation state statitics
<20>:<del> s[-3][1][1] = s[-3][1][1].to(device) # nested quantiation codebook
|
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
def to(self, *args, **kwargs):
<0> device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)
<1>
<2> if (device is not None and device.type == "cuda" and self.data.device.type == "cpu"):
<3> return self.cuda(device)
<4> else:
<5> s = self.quant_state
<6> if s is not None:
<7> # make sure the quantization state is on the right device
<8> s[0] = s[0].to(device)
<9> if self.compress_statistics:
<10> # TODO: refactor this. This is a nightmare
<11> # for 4-bit:
<12> # state = [qabsmax, input_shape, A.dtype, blocksize, [offset, state2], quant_type]
<13> # state2 = [absmax, input_shape, A.dtype, blocksize, None, quant_type]
<14> #s[-2][0] = s[-2][0].to(device) # offset
<15> #s[-2][1][0] = s[-2][1][0].to(device) # nested absmax
<16>
<17> # for 8-bit
<18> s[-3][0] = s[-3][0].to(device) # offset
<19> s[-3][1][0] = s[-3][1][0].to(device) # nested quantiation state statitics
<20> s[-3][1][1] = s[-3][1][1].to(device) # nested quantiation codebook
<21> new_param = Params4bit(super().to(device=device, dtype=dtype, non_blocking=non_blocking),
<22> requires_grad=self.requires_grad, quant_state=self.quant_state,
<23> blocksize=self.blocksize, compress_statistics=self.compress_statistics,
<24> quant_type=self.quant_type)
<25>
<26> return new_param
<27>
|
===========unchanged ref 0===========
at: bitsandbytes.nn.modules
Params4bit(data: Tensor=..., requires_grad: builtins.bool=...)
at: bitsandbytes.nn.modules.Linear4bit.forward
self.compute_type_is_set = True
at: bitsandbytes.nn.modules.Params4bit
cuda(device)
at: bitsandbytes.nn.modules.Params4bit.cuda
self.data = w_4bit
self.quant_state = quant_state
at: torch._C
float32: dtype = ...
bfloat16: dtype = ...
at: torch._tensor.Tensor.__setstate__
self.requires_grad, _, self._backward_hooks = state
at: torch.nn.modules.linear
Linear(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None)
at: torch.nn.modules.linear.Linear
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
__init__(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) -> None
__init__(self, in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) -> None
===========changed ref 0===========
# module: bitsandbytes.autograd._functions
class MatMul4Bit(torch.autograd.Function):
@staticmethod
def backward(ctx, grad_output):
if ctx.is_empty:
bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)
return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None
req_gradA, _, _, req_gradBias, _= ctx.needs_input_grad
A, B = ctx.tensors
- state = ctx.state
grad_A, grad_B, grad_bias = None, None, None
if req_gradBias:
# compute grad_bias first before changing grad_output dtype
grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)
# not supported by PyTorch. TODO: create work-around
#if req_gradB: grad_B = torch.matmul(grad_output.t(), A)
if req_gradA: grad_A = torch.matmul(grad_output, F.dequantize_4bit(B, ctx.state).to(grad_output.dtype).t())
return grad_A, grad_B, None, grad_bias, None
===========changed ref 1===========
# module: bitsandbytes.autograd._functions
+ def matmul_4bit(A: tensor, B: tensor, quant_state: F.QuantState, out: tensor = None, bias=None):
- def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
assert quant_state is not None
if A.numel() == A.shape[-1] and A.requires_grad == False:
- absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = quant_state
+ if A.shape[-1] % quant_state.blocksize != 0:
- if A.shape[-1] % blocksize != 0:
+ warn(f'Some matrices hidden dimension is not a multiple of {quant_state.blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}')
- warn(f'Some matrices hidden dimension is not a multiple of {blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}')
return MatMul4Bit.apply(A, B, out, bias, quant_state)
else:
+ out = F.gemv_4bit(A, B.t(), out, quant_state=quant_state)
- out = F.gemv_4bit(A, B.t(), out, state=quant_state)
if bias is not None:
out += bias
return out
else:
return MatMul4Bit.apply(A, B, out, bias, quant_state)
===========changed ref 2===========
<s>._functions
class MatMul4Bit(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
+ def forward(ctx, A, B, out=None, bias=None, quant_state: F.QuantState = None):
- def forward(ctx, A, B, out=None, bias=None, state=None):
# default of pytorch behavior if inputs are empty
ctx.is_empty = False
if prod(A.shape) == 0:
ctx.is_empty = True
ctx.A = A
ctx.B = B
ctx.bias = bias
+ B_shape = quant_state.shape
- B_shape = state[1]
if A.shape[-1] == B_shape[0]:
return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)
else:
return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)
# 1. Dequantize
# 2. MatmulnN
+ output = torch.nn.functional.linear(A, F.dequantize_4bit(B, quant_state).to(A.dtype).t(), bias)
- output = torch.nn.functional.linear(A, F.dequantize_4bit(B, state).to(A.dtype).t(), bias)
# 3. Save state
+ ctx.state = quant_state
- ctx.state = state
ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype
if any(ctx.needs_input_grad[:2]):
ctx.tensors = (A, B)
else:
ctx.tensors = (None, None)
</s>
===========changed ref 3===========
<s>Mul4Bit(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
+ def forward(ctx, A, B, out=None, bias=None, quant_state: F.QuantState = None):
- def forward(ctx, A, B, out=None, bias=None, state=None):
# offset: 1
<s> ctx.tensors = (A, B)
else:
ctx.tensors = (None, None)
return output
|
bitsandbytes.functional/quantize_blockwise
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
61a4a20da91b7f780a98d3ff8235f1455835ecf2
|
use QuantState class for quant_state
|
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
<0> """
<1> Quantize tensor A in blocks of size 4096 values.
<2>
<3> Quantizes tensor A by dividing it into blocks of 4096 values.
<4> Then the absolute maximum value within these blocks is calculated
<5> for the non-linear quantization.
<6>
<7> Parameters
<8> ----------
<9> A : torch.Tensor
<10> The input tensor.
<11> code : torch.Tensor
<12> The quantization map.
<13> absmax : torch.Tensor
<14> The absmax values.
<15> out : torch.Tensor
<16> The output tensor (8-bit).
<17>
<18> Returns
<19> -------
<20> torch.Tensor:
<21> The 8-bit tensor.
<22> tuple(torch.Tensor, torch.Tensor):
<23> The quantization state to undo the quantization.
<24> """
<25>
<26>
<27> if code is None:
<28> if "dynamic" not in name2qmap:
<29> name2qmap["dynamic"] = create_dynamic_map().to(A.device)
<30> code = name2qmap["dynamic"]
<31>
<32> if absmax is None:
<33> n = A.numel()
<34> blocks = n // blocksize
<35> blocks += 1 if n % blocksize > 0 else 0
<36> absmax = torch.zeros((blocks,), device=A.device, dtype=torch.float32)
<37>
<38> if out is None:
<39> out = torch.zeros_like(A, dtype=torch.uint8)
<40>
<41> if A.device.type != 'cpu':
<42> assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64]
<43> cblocksize = ct.c_int32(blocksize)
<44> prev_device = pre_call(A.device)
<45> code = code.to(A.device)
<46> is_on_gpu([code, A, out, abs</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
# offset: 1
if A.dtype == torch.float32:
lib.cquantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
elif A.dtype == torch.float16:
lib.cquantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
elif A.dtype == torch.bfloat16:
lib.cquantize_blockwise_bf16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
# cpu
code = code.cpu()
lib.cquantize_blockwise_cpu_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
if nested:
offset = absmax.mean()
absmax -= offset
qabsmax, state2 = quantize_blockwise(absmax, blocksize=blocksize, nested=False)
state = [qabsmax, code, blocksize, nested, A.dtype, offset, state2]
else:
state = [absmax, code, blocksize, nested, A.dtype, None, None]
return out, state
===========changed ref 0===========
# module: bitsandbytes.functional
+ class QuantState:
+ def __init__(self, absmax, shape=None, code=None, blocksize=None, quant_type=None, dtype=None, offset=None, state2=None):
+ self.absmax = absmax
+ self.shape = shape
+ self.code = code
+ self.dtype = dtype
+ self.blocksize = blocksize
+ self.quant_type = quant_type
+ self.offset = offset
+ self.state2 = state2
+ self.nested = state2 is not None
+
===========changed ref 1===========
# module: bitsandbytes.autograd._functions
class MatMul4Bit(torch.autograd.Function):
@staticmethod
def backward(ctx, grad_output):
if ctx.is_empty:
bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)
return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None
req_gradA, _, _, req_gradBias, _= ctx.needs_input_grad
A, B = ctx.tensors
- state = ctx.state
grad_A, grad_B, grad_bias = None, None, None
if req_gradBias:
# compute grad_bias first before changing grad_output dtype
grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)
# not supported by PyTorch. TODO: create work-around
#if req_gradB: grad_B = torch.matmul(grad_output.t(), A)
if req_gradA: grad_A = torch.matmul(grad_output, F.dequantize_4bit(B, ctx.state).to(grad_output.dtype).t())
return grad_A, grad_B, None, grad_bias, None
===========changed ref 2===========
# module: bitsandbytes.autograd._functions
+ def matmul_4bit(A: tensor, B: tensor, quant_state: F.QuantState, out: tensor = None, bias=None):
- def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
assert quant_state is not None
if A.numel() == A.shape[-1] and A.requires_grad == False:
- absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = quant_state
+ if A.shape[-1] % quant_state.blocksize != 0:
- if A.shape[-1] % blocksize != 0:
+ warn(f'Some matrices hidden dimension is not a multiple of {quant_state.blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}')
- warn(f'Some matrices hidden dimension is not a multiple of {blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}')
return MatMul4Bit.apply(A, B, out, bias, quant_state)
else:
+ out = F.gemv_4bit(A, B.t(), out, quant_state=quant_state)
- out = F.gemv_4bit(A, B.t(), out, state=quant_state)
if bias is not None:
out += bias
return out
else:
return MatMul4Bit.apply(A, B, out, bias, quant_state)
===========changed ref 3===========
<s>._functions
class MatMul4Bit(torch.autograd.Function):
# forward is the same, but we added the fallback for pre-turing GPUs
# backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
@staticmethod
+ def forward(ctx, A, B, out=None, bias=None, quant_state: F.QuantState = None):
- def forward(ctx, A, B, out=None, bias=None, state=None):
# default of pytorch behavior if inputs are empty
ctx.is_empty = False
if prod(A.shape) == 0:
ctx.is_empty = True
ctx.A = A
ctx.B = B
ctx.bias = bias
+ B_shape = quant_state.shape
- B_shape = state[1]
if A.shape[-1] == B_shape[0]:
return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)
else:
return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)
# 1. Dequantize
# 2. MatmulnN
+ output = torch.nn.functional.linear(A, F.dequantize_4bit(B, quant_state).to(A.dtype).t(), bias)
- output = torch.nn.functional.linear(A, F.dequantize_4bit(B, state).to(A.dtype).t(), bias)
# 3. Save state
+ ctx.state = quant_state
- ctx.state = state
ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype
if any(ctx.needs_input_grad[:2]):
ctx.tensors = (A, B)
else:
ctx.tensors = (None, None)
</s>
|
|
bitsandbytes.functional/dequantize_blockwise
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
61a4a20da91b7f780a98d3ff8235f1455835ecf2
|
use QuantState class for quant_state
|
<10>:<add> quant_state : QuantState
<add> Object with code, absmax and other quantization state components.
<del> quant_state : tuple(torch.Tensor, torch.Tensor)
<11>:<del> Tuple of code and absmax values.
<32>:<del> quant_state = (absmax, code, blocksize, False, torch.float32, None, None)
<33>:<del>
<34>:<del> absmax, code, blocksize, nested, dtype, offset, state2 = quant_state
<35>:<del>
<36>:<del> if nested:
<37>:<add> quant_state = QuantState(absmax=absmax, code=code, blocksize=blocksize, dtype=torch.float32)
<add>
<add> absmax = quant_state.absmax
<add> if quant_state.nested:
<add> absmax = dequantize_blockwise(quant_state.absmax, quant_state.state2)
<del> absmax = dequantize_blockwise(absmax, state2)
<38>:<add> absmax += quant_state.offset
<del> absmax += offset
<42>:<add> out = torch.empty(A.shape, dtype=quant_state.dtype, device=A.device)
<del> out = torch.empty(A.shape, dtype=dtype, device
|
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
+ quant_state: QuantState = None,
- quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
<0> """
<1> Dequantizes blockwise quantized values.
<2>
<3> Dequantizes the tensor A with maximum absolute values absmax in
<4> blocks of size 4096.
<5>
<6> Parameters
<7> ----------
<8> A : torch.Tensor
<9> The input 8-bit tensor.
<10> quant_state : tuple(torch.Tensor, torch.Tensor)
<11> Tuple of code and absmax values.
<12> absmax : torch.Tensor
<13> The absmax values.
<14> code : torch.Tensor
<15> The quantization map.
<16> out : torch.Tensor
<17> Dequantized output tensor (default: float32)
<18>
<19>
<20> Returns
<21> -------
<22> torch.Tensor:
<23> Dequantized tensor (default: float32)
<24> """
<25> assert quant_state is not None or absmax is not None
<26> if code is None and quant_state is None:
<27> if "dynamic" not in name2qmap:
<28> name2qmap["dynamic"] = create_dynamic_map().to(A.device)
<29> code = name2qmap["dynamic"]
<30>
<31> if quant_state is None:
<32> quant_state = (absmax, code, blocksize, False, torch.float32, None, None)
<33>
<34> absmax, code, blocksize, nested, dtype, offset, state2 = quant_state
<35>
<36> if nested:
<37> absmax = dequantize_blockwise(absmax, state2)
<38> absmax += offset
<39> if absmax.dtype != torch.float32: absmax = absmax.float()
<40>
<41> if out is None:
<42> out = torch.empty(A.shape, dtype=dtype, device</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
+ quant_state: QuantState = None,
- quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
# offset: 1
if A.device.type != 'cpu':
device = pre_call(A.device)
code = code.to(A.device)
if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]:
raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048, 4096, 1024, 512, 256, 128, 64]")
is_on_gpu([A, absmax, out])
if out.dtype == torch.float32:
lib.cdequantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
elif out.dtype == torch.float16:
lib.cdequantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
elif out.dtype == torch.bfloat16:
lib.cdequantize_blockwise_bf16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
code = code.cpu()
lib.cdequantize_blockwise_cpu_fp32(get_ptr(quant_</s>
===========below chunk 1===========
# module: bitsandbytes.functional
def dequantize_blockwise(
A: Tensor,
+ quant_state: QuantState = None,
- quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
blocksize: int = 4096,
nested=False
) -> Tensor:
# offset: 2
<s> code = code.cpu()
lib.cdequantize_blockwise_cpu_fp32(get_ptr(quant_state[1]), get_ptr(A), get_ptr(quant_state[0]), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
return out
===========changed ref 0===========
# module: bitsandbytes.functional
+ class QuantState:
+ def __init__(self, absmax, shape=None, code=None, blocksize=None, quant_type=None, dtype=None, offset=None, state2=None):
+ self.absmax = absmax
+ self.shape = shape
+ self.code = code
+ self.dtype = dtype
+ self.blocksize = blocksize
+ self.quant_type = quant_type
+ self.offset = offset
+ self.state2 = state2
+ self.nested = state2 is not None
+
===========changed ref 1===========
# module: bitsandbytes.functional
+ class QuantState:
+ def to(self, device):
+ # make sure the quantization state is on the right device
+ self.absmax = self.absmax.to(device)
+ if self.nested:
+ self.offset = self.offset.to(device)
+ self.state2.absmax = self.state2.absmax.to(device)
+ self.state2.code = self.state2.code.to(device)
+
===========changed ref 2===========
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
"""
Quantize tensor A in blocks of size 4096 values.
Quantizes tensor A by dividing it into blocks of 4096 values.
Then the absolute maximum value within these blocks is calculated
for the non-linear quantization.
Parameters
----------
A : torch.Tensor
The input tensor.
code : torch.Tensor
The quantization map.
absmax : torch.Tensor
The absmax values.
out : torch.Tensor
The output tensor (8-bit).
Returns
-------
torch.Tensor:
The 8-bit tensor.
tuple(torch.Tensor, torch.Tensor):
The quantization state to undo the quantization.
"""
if code is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
if absmax is None:
n = A.numel()
blocks = n // blocksize
blocks += 1 if n % blocksize > 0 else 0
absmax = torch.zeros((blocks,), device=A.device, dtype=torch.float32)
if out is None:
out = torch.zeros_like(A, dtype=torch.uint8)
if A.device.type != 'cpu':
assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64]
cblocksize = ct.c_int32(blocksize)
prev_device = pre_call(A.device)
code = code.to(A.device)
is_on_gpu([code, A, out, absmax])
if A.dtype == torch.float32:
lib.cquantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax),</s>
===========changed ref 3===========
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
# offset: 1
<s>.cquantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
elif A.dtype == torch.float16:
lib.cquantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
elif A.dtype == torch.bfloat16:
lib.cquantize_blockwise_bf16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
# cpu
code = code.cpu()
lib.cquantize_blockwise_cpu_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
if nested:
offset = absmax.mean()
absmax -= offset
qabsmax, state2 = quantize_blockwise(absmax, blocksize=blocksize, nested=False)
+ quant_state = QuantState(absmax=qabsmax, code=code, blocksize=blocksize, dtype=A.dtype, offset=offset, state2=state2)
- state = [qabsmax, code, blocksize, nested, A.dtype</s>
|
bitsandbytes.functional/quantize_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
61a4a20da91b7f780a98d3ff8235f1455835ecf2
|
use QuantState class for quant_state
|
# module: bitsandbytes.functional
def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4') -> Tensor:
<0> """
<1> Quantize tensor A in blocks of 4-bit values.
<2>
<3> Quantizes tensor A by dividing it into blocks which are independently quantized to FP4.
<4>
<5> Parameters
<6> ----------
<7> A : torch.Tensor
<8> The input tensor.
<9> absmax : torch.Tensor
<10> The absmax values.
<11> out : torch.Tensor
<12> The output tensor (8-bit).
<13> blocksize : int
<14> The blocksize used in quantization.
<15> quant_type : str
<16> The 4-bit quantization data type {fp4, nf4}
<17>
<18> Returns
<19> -------
<20> torch.Tensor:
<21> The 8-bit tensor with packed 4-bit values.
<22> tuple(torch.Tensor, torch.Size, torch.dtype, int):
<23> The quantization state to undo the quantization.
<24> """
<25> if A.device.type != 'cuda':
<26> raise NotImplementedError(f'Device type not supported for FP4 quantization: {A.device.type}')
<27> if quant_type not in ['fp4', 'nf4']:
<28> raise NotImplementedError(f'4-bit quantization data type {quant_type} is not implemented.')
<29>
<30> n = A.numel()
<31> input_shape = A.shape
<32>
<33> if absmax is None:
<34> blocks = n // blocksize
<35> blocks += 1 if n % blocksize > 0 else 0
<36> absmax = torch.zeros((blocks,), device=A.device, dtype=torch.float32)
<37>
<38>
<39> if out is None:
<40> out = torch.zeros(((n+1)//2, 1), dtype=torch.uint8, device=A.device)
<41>
<42> assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64]
<43>
</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4') -> Tensor:
# offset: 1
is_on_gpu([A, out, absmax])
if A.dtype == torch.float32:
if quant_type == 'fp4':
lib.cquantize_blockwise_fp32_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
else:
lib.cquantize_blockwise_fp32_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
elif A.dtype == torch.float16:
if quant_type == 'fp4':
lib.cquantize_blockwise_fp16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
else:
lib.cquantize_blockwise_fp16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
elif A.dtype == torch.bfloat16:
if quant_type == 'fp4':
lib.cquantize_blockwise_bf16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
else:
lib.cquantize_blockwise_bf16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize</s>
===========below chunk 1===========
# module: bitsandbytes.functional
def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4') -> Tensor:
# offset: 2
<s>None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
datatype = get_4bit_type(quant_type, device=A.device)
if compress_statistics:
offset = absmax.mean()
absmax -= offset
qabsmax, state2 = quantize_blockwise(absmax, blocksize=256)
del absmax
state = [qabsmax, input_shape, A.dtype, blocksize, [offset, state2], quant_type, datatype]
else:
state = [absmax, input_shape, A.dtype, blocksize, None, quant_type, datatype]
return out, state
===========changed ref 0===========
# module: bitsandbytes.functional
+ class QuantState:
+ def __init__(self, absmax, shape=None, code=None, blocksize=None, quant_type=None, dtype=None, offset=None, state2=None):
+ self.absmax = absmax
+ self.shape = shape
+ self.code = code
+ self.dtype = dtype
+ self.blocksize = blocksize
+ self.quant_type = quant_type
+ self.offset = offset
+ self.state2 = state2
+ self.nested = state2 is not None
+
===========changed ref 1===========
# module: bitsandbytes.functional
+ class QuantState:
+ def to(self, device):
+ # make sure the quantization state is on the right device
+ self.absmax = self.absmax.to(device)
+ if self.nested:
+ self.offset = self.offset.to(device)
+ self.state2.absmax = self.state2.absmax.to(device)
+ self.state2.code = self.state2.code.to(device)
+
===========changed ref 2===========
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
"""
Quantize tensor A in blocks of size 4096 values.
Quantizes tensor A by dividing it into blocks of 4096 values.
Then the absolute maximum value within these blocks is calculated
for the non-linear quantization.
Parameters
----------
A : torch.Tensor
The input tensor.
code : torch.Tensor
The quantization map.
absmax : torch.Tensor
The absmax values.
out : torch.Tensor
The output tensor (8-bit).
Returns
-------
torch.Tensor:
The 8-bit tensor.
tuple(torch.Tensor, torch.Tensor):
The quantization state to undo the quantization.
"""
if code is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
if absmax is None:
n = A.numel()
blocks = n // blocksize
blocks += 1 if n % blocksize > 0 else 0
absmax = torch.zeros((blocks,), device=A.device, dtype=torch.float32)
if out is None:
out = torch.zeros_like(A, dtype=torch.uint8)
if A.device.type != 'cpu':
assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64]
cblocksize = ct.c_int32(blocksize)
prev_device = pre_call(A.device)
code = code.to(A.device)
is_on_gpu([code, A, out, absmax])
if A.dtype == torch.float32:
lib.cquantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax),</s>
|
|
bitsandbytes.functional/dequantize_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
61a4a20da91b7f780a98d3ff8235f1455835ecf2
|
use QuantState class for quant_state
|
<9>:<add> quant_state : QuantState
<del> quant_state : tuple(torch.Tensor, torch.Size, torch.dtype)
<10>:<add> object with quantisation stats, incl. absmax values, original tensor shape and original dtype.
<del> Tuple of absmax values, original tensor shape and original dtype.
<33>:<add>
<add> quant_state = QuantState(absmax=absmax, shape=out.shape, dtype=out.dtype, blocksize=blocksize, quant_type=quant_type)
<add>
<del> shape = out.shape
<34>:<del> dtype = out.dtype
<36>:<add> absmax = quant_state.absmax
<del> absmax, shape, dtype, blocksize, compressed_stats, quant_
|
# module: bitsandbytes.functional
+ def dequantize_4bit(A: Tensor, quant_state: QuantState = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64, quant_type='fp4') -> Tensor:
- def dequantize_4bit(A: Tensor,quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64, quant_type='fp4') -> Tensor:
<0> """
<1> Dequantizes FP4 blockwise quantized values.
<2>
<3> Dequantizes the tensor A with maximum absolute values absmax in blocks of size blocksize.
<4>
<5> Parameters
<6> ----------
<7> A : torch.Tensor
<8> The input 8-bit tensor (packed 4-bit values).
<9> quant_state : tuple(torch.Tensor, torch.Size, torch.dtype)
<10> Tuple of absmax values, original tensor shape and original dtype.
<11> absmax : torch.Tensor
<12> The absmax values.
<13> out : torch.Tensor
<14> Dequantized output tensor.
<15> blocksize : int
<16> The blocksize used in quantization.
<17> quant_type : str
<18> The 4-bit quantization data type {fp4, nf4}
<19>
<20>
<21> Returns
<22> -------
<23> torch.Tensor:
<24> Dequantized tensor.
<25> """
<26> if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]:
<27> raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048, 4096, 1024, 512, 256, 128, 64]")
<28> if quant_type not in ['fp4', 'nf4']:
<29> raise NotImplementedError(f'4-bit quantization data type {quant_type} is not implemented.')
<30>
<31> if quant_state is None:
<32> assert absmax is not None and out is not None
<33> shape = out.shape
<34> dtype = out.dtype
<35> else:
<36> absmax, shape, dtype, blocksize, compressed_stats, quant_</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
+ def dequantize_4bit(A: Tensor, quant_state: QuantState = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64, quant_type='fp4') -> Tensor:
- def dequantize_4bit(A: Tensor,quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64, quant_type='fp4') -> Tensor:
# offset: 1
if compressed_stats is not None:
offset, state2 = compressed_stats
absmax = dequantize_blockwise(absmax, state2)
absmax += offset
if absmax.dtype != torch.float32: absmax = absmax.float()
if out is None:
out = torch.empty(shape, dtype=dtype, device=A.device)
n = out.numel()
device = pre_call(A.device)
is_on_gpu([A, absmax, out])
if out.dtype == torch.float32:
if quant_type == 'fp4':
lib.cdequantize_blockwise_fp32_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
else:
lib.cdequantize_blockwise_fp32_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
elif out.dtype == torch.float16:
if quant_type == 'fp4':
lib.cdequantize_blockwise_fp16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
else:
lib.cdequantize_blockwise_fp16_nf4(</s>
===========below chunk 1===========
# module: bitsandbytes.functional
+ def dequantize_4bit(A: Tensor, quant_state: QuantState = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64, quant_type='fp4') -> Tensor:
- def dequantize_4bit(A: Tensor,quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64, quant_type='fp4') -> Tensor:
# offset: 2
<s>.c_int(n))
else:
lib.cdequantize_blockwise_fp16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
elif out.dtype == torch.bfloat16:
if quant_type == 'fp4':
lib.cdequantize_blockwise_bf16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
else:
lib.cdequantize_blockwise_bf16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
is_transposed = (True if A.shape[0] == 1 else False)
if is_transposed: return out.t()
else: return out
===========changed ref 0===========
# module: bitsandbytes.functional
+ class QuantState:
+ def __init__(self, absmax, shape=None, code=None, blocksize=None, quant_type=None, dtype=None, offset=None, state2=None):
+ self.absmax = absmax
+ self.shape = shape
+ self.code = code
+ self.dtype = dtype
+ self.blocksize = blocksize
+ self.quant_type = quant_type
+ self.offset = offset
+ self.state2 = state2
+ self.nested = state2 is not None
+
===========changed ref 1===========
# module: bitsandbytes.functional
+ class QuantState:
+ def to(self, device):
+ # make sure the quantization state is on the right device
+ self.absmax = self.absmax.to(device)
+ if self.nested:
+ self.offset = self.offset.to(device)
+ self.state2.absmax = self.state2.absmax.to(device)
+ self.state2.code = self.state2.code.to(device)
+
===========changed ref 2===========
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
"""
Quantize tensor A in blocks of size 4096 values.
Quantizes tensor A by dividing it into blocks of 4096 values.
Then the absolute maximum value within these blocks is calculated
for the non-linear quantization.
Parameters
----------
A : torch.Tensor
The input tensor.
code : torch.Tensor
The quantization map.
absmax : torch.Tensor
The absmax values.
out : torch.Tensor
The output tensor (8-bit).
Returns
-------
torch.Tensor:
The 8-bit tensor.
tuple(torch.Tensor, torch.Tensor):
The quantization state to undo the quantization.
"""
if code is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
if absmax is None:
n = A.numel()
blocks = n // blocksize
blocks += 1 if n % blocksize > 0 else 0
absmax = torch.zeros((blocks,), device=A.device, dtype=torch.float32)
if out is None:
out = torch.zeros_like(A, dtype=torch.uint8)
if A.device.type != 'cpu':
assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64]
cblocksize = ct.c_int32(blocksize)
prev_device = pre_call(A.device)
code = code.to(A.device)
is_on_gpu([code, A, out, absmax])
if A.dtype == torch.float32:
lib.cquantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax),</s>
|
bitsandbytes.functional/dequantize
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
61a4a20da91b7f780a98d3ff8235f1455835ecf2
|
use QuantState class for quant_state
|
<0>:<add> assert state is not None or absmax is not None
<del> assert quant_state is not None or absmax is not None
<1>:<add> if code is None and state is None:
<del> if code is None and quant_state is None:
<7>:<add> if state is None:
<del> if quant_state is None:
<8>:<add> state = (absmax, code)
<del> quant_state = (absmax, code)
<9>:<add> out = dequantize_no_absmax(A, state[1], out)
<del> out = dequantize_no_absmax(A, quant_state[1], out)
<10>:<add> return out * state[0]
<del> return out * quant_state[0]
|
# module: bitsandbytes.functional
def dequantize(
A: Tensor,
+ state: Tuple[Tensor, Tensor] = None,
- quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
) -> Tensor:
<0> assert quant_state is not None or absmax is not None
<1> if code is None and quant_state is None:
<2> if "dynamic" not in name2qmap:
<3> name2qmap["dynamic"] = create_dynamic_map().to(A.device)
<4> code = name2qmap["dynamic"]
<5> code = code.to(A.device)
<6>
<7> if quant_state is None:
<8> quant_state = (absmax, code)
<9> out = dequantize_no_absmax(A, quant_state[1], out)
<10> return out * quant_state[0]
<11>
|
===========changed ref 0===========
# module: bitsandbytes.functional
+ class QuantState:
+ def __init__(self, absmax, shape=None, code=None, blocksize=None, quant_type=None, dtype=None, offset=None, state2=None):
+ self.absmax = absmax
+ self.shape = shape
+ self.code = code
+ self.dtype = dtype
+ self.blocksize = blocksize
+ self.quant_type = quant_type
+ self.offset = offset
+ self.state2 = state2
+ self.nested = state2 is not None
+
===========changed ref 1===========
# module: bitsandbytes.functional
+ class QuantState:
+ def to(self, device):
+ # make sure the quantization state is on the right device
+ self.absmax = self.absmax.to(device)
+ if self.nested:
+ self.offset = self.offset.to(device)
+ self.state2.absmax = self.state2.absmax.to(device)
+ self.state2.code = self.state2.code.to(device)
+
===========changed ref 2===========
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
"""
Quantize tensor A in blocks of size 4096 values.
Quantizes tensor A by dividing it into blocks of 4096 values.
Then the absolute maximum value within these blocks is calculated
for the non-linear quantization.
Parameters
----------
A : torch.Tensor
The input tensor.
code : torch.Tensor
The quantization map.
absmax : torch.Tensor
The absmax values.
out : torch.Tensor
The output tensor (8-bit).
Returns
-------
torch.Tensor:
The 8-bit tensor.
tuple(torch.Tensor, torch.Tensor):
The quantization state to undo the quantization.
"""
if code is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
if absmax is None:
n = A.numel()
blocks = n // blocksize
blocks += 1 if n % blocksize > 0 else 0
absmax = torch.zeros((blocks,), device=A.device, dtype=torch.float32)
if out is None:
out = torch.zeros_like(A, dtype=torch.uint8)
if A.device.type != 'cpu':
assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64]
cblocksize = ct.c_int32(blocksize)
prev_device = pre_call(A.device)
code = code.to(A.device)
is_on_gpu([code, A, out, absmax])
if A.dtype == torch.float32:
lib.cquantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax),</s>
===========changed ref 3===========
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
# offset: 1
<s>.cquantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
elif A.dtype == torch.float16:
lib.cquantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
elif A.dtype == torch.bfloat16:
lib.cquantize_blockwise_bf16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
else:
# cpu
code = code.cpu()
lib.cquantize_blockwise_cpu_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
if nested:
offset = absmax.mean()
absmax -= offset
qabsmax, state2 = quantize_blockwise(absmax, blocksize=blocksize, nested=False)
+ quant_state = QuantState(absmax=qabsmax, code=code, blocksize=blocksize, dtype=A.dtype, offset=offset, state2=state2)
- state = [qabsmax, code, blocksize, nested, A.dtype</s>
===========changed ref 4===========
# module: bitsandbytes.functional
def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
# offset: 2
<s>, state2]
else:
+ quant_state = QuantState(absmax=absmax, code=code, blocksize=blocksize, dtype=A.dtype)
- state = [absmax, code, blocksize, nested, A.dtype, None, None]
+ return out, quant_state
- return out, state
===========changed ref 5===========
# module: bitsandbytes.autograd._functions
class MatMul4Bit(torch.autograd.Function):
@staticmethod
def backward(ctx, grad_output):
if ctx.is_empty:
bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)
return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None
req_gradA, _, _, req_gradBias, _= ctx.needs_input_grad
A, B = ctx.tensors
- state = ctx.state
grad_A, grad_B, grad_bias = None, None, None
if req_gradBias:
# compute grad_bias first before changing grad_output dtype
grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)
# not supported by PyTorch. TODO: create work-around
#if req_gradB: grad_B = torch.matmul(grad_output.t(), A)
if req_gradA: grad_A = torch.matmul(grad_output, F.dequantize_4bit(B, ctx.state).to(grad_output.dtype).t())
return grad_A, grad_B, None, grad_bias, None
|
bitsandbytes.functional/gemv_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
61a4a20da91b7f780a98d3ff8235f1455835ecf2
|
use QuantState class for quant_state
|
<2>:<add> if quant_state is None:
<del> if state is None:
<8>:<add> Bshape = quant_state.shape
<del> Bshape = state[1]
<10>:<del> absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = state
<11>:<del> if compressed_stats is not None:
<12>:<del> offset, state2 = compressed_stats
<13>:<add> absmax = quant_state.absmax
<add> if quant_state.nested:
<add> absmax = dequantize_blockwise(quant_state.absmax, quant_state.state2)
<del> absmax = dequantize_blockwise(absmax, state2)
<14>:<add> absmax += quant_state.offset
<del> absmax += offset
<28>:<add> is_on_gpu([B, A, out, absmax, quant_state.code])
<del> is_on_gpu([B, A, out, absmax, state[-1]])
|
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
+ quant_state=None
- state=None
):
<0> prev_device = pre_call(A.device)
<1> #sout = check_matmul(A, B, out, transposed_A, transposed_B, expected_type=A.dtype)
<2> if state is None:
<3> raise ValueError(f'state cannot None. gem_4bit( ) requires the state from quantize_4bit( )')
<4>
<5> if A.numel() != A.shape[-1]:
<6> raise ValueError(f'Dimensions of A are invalid. Must be a vector with the leading dimensions of "1", e.g. [1, 1, 2048]')
<7>
<8> Bshape = state[1]
<9> bout = Bshape[0]
<10> absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = state
<11> if compressed_stats is not None:
<12> offset, state2 = compressed_stats
<13> absmax = dequantize_blockwise(absmax, state2)
<14> absmax += offset
<15>
<16> if out is None:
<17> if len(A.shape) == 3:
<18> out = torch.empty(size=(A.shape[0], A.shape[1], bout), dtype=A.dtype, device=A.device)
<19> else:
<20> out = torch.empty(size=(A.shape[0], bout), dtype=A.dtype, device=A.device)
<21>
<22> n = 1
<23> m = Bshape[0]
<24> k = Bshape[1]
<25> lda = Bshape[0]
<26> ldc = Bshape[0]
<27> ldb = (A.shape[-1]+1)//2
<28> is_on_gpu([B, A, out, absmax, state[-1]])
<29> m = ct.c_int32(m</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
+ quant_state=None
- state=None
):
# offset: 1
n = ct.c_int32(n)
k = ct.c_int32(k)
lda = ct.c_int32(lda)
ldb = ct.c_int32(ldb)
ldc = ct.c_int32(ldc)
if B.dtype == torch.uint8:
if A.dtype == torch.float16:
lib.cgemm_4bit_inference_naive_fp16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(state[-1]), get_ptr(out), lda, ldb, ldc, ct.c_int32(state[3]))
elif A.dtype == torch.bfloat16:
lib.cgemm_4bit_inference_naive_bf16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(state[-1]), get_ptr(out), lda, ldb, ldc, ct.c_int32(state[3]))
elif A.dtype == torch.float32:
lib.cgemm_4bit_inference_naive_fp32(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(state[-1]), get_ptr(out), lda, ldb, ldc, ct.c_int32(state[3]))
else:
raise NotImplementedError(f'Matmul not implemented for data type {A.dtype}')
else:
raise NotImplementedError(f'Matmul not implemented for data type {A.dtype}')
post_call(prev_device)
return out
===========changed ref 0===========
# module: bitsandbytes.functional
def dequantize(
A: Tensor,
+ state: Tuple[Tensor, Tensor] = None,
- quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
) -> Tensor:
+ assert state is not None or absmax is not None
- assert quant_state is not None or absmax is not None
+ if code is None and state is None:
- if code is None and quant_state is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
code = code.to(A.device)
+ if state is None:
- if quant_state is None:
+ state = (absmax, code)
- quant_state = (absmax, code)
+ out = dequantize_no_absmax(A, state[1], out)
- out = dequantize_no_absmax(A, quant_state[1], out)
+ return out * state[0]
- return out * quant_state[0]
===========changed ref 1===========
# module: bitsandbytes.functional
+ class QuantState:
+ def __init__(self, absmax, shape=None, code=None, blocksize=None, quant_type=None, dtype=None, offset=None, state2=None):
+ self.absmax = absmax
+ self.shape = shape
+ self.code = code
+ self.dtype = dtype
+ self.blocksize = blocksize
+ self.quant_type = quant_type
+ self.offset = offset
+ self.state2 = state2
+ self.nested = state2 is not None
+
===========changed ref 2===========
# module: bitsandbytes.functional
+ class QuantState:
+ def to(self, device):
+ # make sure the quantization state is on the right device
+ self.absmax = self.absmax.to(device)
+ if self.nested:
+ self.offset = self.offset.to(device)
+ self.state2.absmax = self.state2.absmax.to(device)
+ self.state2.code = self.state2.code.to(device)
+
===========changed ref 3===========
# module: bitsandbytes.autograd._functions
class MatMul4Bit(torch.autograd.Function):
@staticmethod
def backward(ctx, grad_output):
if ctx.is_empty:
bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)
return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None
req_gradA, _, _, req_gradBias, _= ctx.needs_input_grad
A, B = ctx.tensors
- state = ctx.state
grad_A, grad_B, grad_bias = None, None, None
if req_gradBias:
# compute grad_bias first before changing grad_output dtype
grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)
# not supported by PyTorch. TODO: create work-around
#if req_gradB: grad_B = torch.matmul(grad_output.t(), A)
if req_gradA: grad_A = torch.matmul(grad_output, F.dequantize_4bit(B, ctx.state).to(grad_output.dtype).t())
return grad_A, grad_B, None, grad_bias, None
===========changed ref 4===========
# module: bitsandbytes.autograd._functions
+ def matmul_4bit(A: tensor, B: tensor, quant_state: F.QuantState, out: tensor = None, bias=None):
- def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
assert quant_state is not None
if A.numel() == A.shape[-1] and A.requires_grad == False:
- absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = quant_state
+ if A.shape[-1] % quant_state.blocksize != 0:
- if A.shape[-1] % blocksize != 0:
+ warn(f'Some matrices hidden dimension is not a multiple of {quant_state.blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}')
- warn(f'Some matrices hidden dimension is not a multiple of {blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}')
return MatMul4Bit.apply(A, B, out, bias, quant_state)
else:
+ out = F.gemv_4bit(A, B.t(), out, quant_state=quant_state)
- out = F.gemv_4bit(A, B.t(), out, state=quant_state)
if bias is not None:
out += bias
return out
else:
return MatMul4Bit.apply(A, B, out, bias, quant_state)
|
bitsandbytes.functional/mm_dequant
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
61a4a20da91b7f780a98d3ff8235f1455835ecf2
|
use QuantState class for quant_state
|
<2>:<add> out_shape = state[0]
<del> out_shape = quant_state[0]
|
# module: bitsandbytes.functional
def mm_dequant(
A,
+ state,
- quant_state,
row_stats,
col_stats,
out=None,
new_row_stats=None,
new_col_stats=None,
bias=None
):
<0> assert A.dtype == torch.int32
<1> if bias is not None: assert bias.dtype == torch.float16
<2> out_shape = quant_state[0]
<3> if len(out_shape) == 3:
<4> out_shape = (out_shape[0] * out_shape[1], out_shape[2])
<5>
<6> if out is None:
<7> out = torch.empty(out_shape, dtype=torch.float16, device=A.device)
<8> if new_row_stats is None:
<9> new_row_stats = torch.empty(
<10> out_shape[0], dtype=torch.float32, device=A.device
<11> )
<12> if new_col_stats is None:
<13> new_col_stats = torch.empty(
<14> out_shape[1], dtype=torch.float32, device=A.device
<15> )
<16> assert (
<17> new_row_stats.shape[0] == row_stats.shape[0]
<18> ), f"{new_row_stats.shape} vs {row_stats.shape}"
<19> assert (
<20> new_col_stats.shape[0] == col_stats.shape[0]
<21> ), f"{new_col_stats.shape} vs {col_stats.shape}"
<22>
<23> prev_device = pre_call(A.device)
<24> ptrA = get_ptr(A)
<25> ptrOut = get_ptr(out)
<26> ptrRowStats = get_ptr(row_stats)
<27> ptrColStats = get_ptr(col_stats)
<28> ptrNewRowStats = get_ptr(new_row_stats)
<29> ptrNewColStats = get_ptr(new_col_stats)
<30> ptrBias</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def mm_dequant(
A,
+ state,
- quant_state,
row_stats,
col_stats,
out=None,
new_row_stats=None,
new_col_stats=None,
bias=None
):
# offset: 1
numRows = ct.c_int32(out_shape[0])
numCols = ct.c_int32(out_shape[1])
is_on_gpu([A, row_stats, col_stats, out, new_row_stats, new_col_stats, bias])
lib.cdequant_mm_int32_fp16(ptrA, ptrRowStats, ptrColStats, ptrOut, ptrNewRowStats, ptrNewColStats, ptrBias, numRows, numCols)
post_call(prev_device)
return out
===========changed ref 0===========
# module: bitsandbytes.functional
def dequantize(
A: Tensor,
+ state: Tuple[Tensor, Tensor] = None,
- quant_state: Tuple[Tensor, Tensor] = None,
absmax: Tensor = None,
code: Tensor = None,
out: Tensor = None,
) -> Tensor:
+ assert state is not None or absmax is not None
- assert quant_state is not None or absmax is not None
+ if code is None and state is None:
- if code is None and quant_state is None:
if "dynamic" not in name2qmap:
name2qmap["dynamic"] = create_dynamic_map().to(A.device)
code = name2qmap["dynamic"]
code = code.to(A.device)
+ if state is None:
- if quant_state is None:
+ state = (absmax, code)
- quant_state = (absmax, code)
+ out = dequantize_no_absmax(A, state[1], out)
- out = dequantize_no_absmax(A, quant_state[1], out)
+ return out * state[0]
- return out * quant_state[0]
===========changed ref 1===========
# module: bitsandbytes.functional
+ class QuantState:
+ def __init__(self, absmax, shape=None, code=None, blocksize=None, quant_type=None, dtype=None, offset=None, state2=None):
+ self.absmax = absmax
+ self.shape = shape
+ self.code = code
+ self.dtype = dtype
+ self.blocksize = blocksize
+ self.quant_type = quant_type
+ self.offset = offset
+ self.state2 = state2
+ self.nested = state2 is not None
+
===========changed ref 2===========
# module: bitsandbytes.functional
+ class QuantState:
+ def to(self, device):
+ # make sure the quantization state is on the right device
+ self.absmax = self.absmax.to(device)
+ if self.nested:
+ self.offset = self.offset.to(device)
+ self.state2.absmax = self.state2.absmax.to(device)
+ self.state2.code = self.state2.code.to(device)
+
===========changed ref 3===========
# module: bitsandbytes.autograd._functions
class MatMul4Bit(torch.autograd.Function):
@staticmethod
def backward(ctx, grad_output):
if ctx.is_empty:
bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)
return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None
req_gradA, _, _, req_gradBias, _= ctx.needs_input_grad
A, B = ctx.tensors
- state = ctx.state
grad_A, grad_B, grad_bias = None, None, None
if req_gradBias:
# compute grad_bias first before changing grad_output dtype
grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)
# not supported by PyTorch. TODO: create work-around
#if req_gradB: grad_B = torch.matmul(grad_output.t(), A)
if req_gradA: grad_A = torch.matmul(grad_output, F.dequantize_4bit(B, ctx.state).to(grad_output.dtype).t())
return grad_A, grad_B, None, grad_bias, None
===========changed ref 4===========
# module: bitsandbytes.autograd._functions
+ def matmul_4bit(A: tensor, B: tensor, quant_state: F.QuantState, out: tensor = None, bias=None):
- def matmul_4bit(A: tensor, B: tensor, quant_state: List, out: tensor = None, bias=None):
assert quant_state is not None
if A.numel() == A.shape[-1] and A.requires_grad == False:
- absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = quant_state
+ if A.shape[-1] % quant_state.blocksize != 0:
- if A.shape[-1] % blocksize != 0:
+ warn(f'Some matrices hidden dimension is not a multiple of {quant_state.blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}')
- warn(f'Some matrices hidden dimension is not a multiple of {blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}')
return MatMul4Bit.apply(A, B, out, bias, quant_state)
else:
+ out = F.gemv_4bit(A, B.t(), out, quant_state=quant_state)
- out = F.gemv_4bit(A, B.t(), out, state=quant_state)
if bias is not None:
out += bias
return out
else:
return MatMul4Bit.apply(A, B, out, bias, quant_state)
|
tests.test_functional/test_gemv_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
61a4a20da91b7f780a98d3ff8235f1455835ecf2
|
use QuantState class for quant_state
|
<s>4', 'fp4'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
<0> for dim in [128, 256, 512, 1024]:
<1> #for dim in [4*1024]:
<2> #for dim in [1*16]:
<3> errs1 = []
<4> errs2 = []
<5> errs3 = []
<6> relerrs1 = []
<7> relerrs2 = []
<8> relerrs3 = []
<9> max_errs1 = []
<10> max_errs2 = []
<11> max_errs3 = []
<12>
<13>
<14> for i in range(100):
<15> if kind == 'fc1':
<16> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<17> B = torch.randn(dim*4, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<18> elif kind == 'fc2':
<19> A = torch.randn(1, 4*dim, dtype=dtype, device='cuda')
<20> B = torch.randn(dim, 4*dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<21> elif kind == 'attn':
<22> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<23> B = torch.randn(dim, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<24> elif kind == 'attn_packed':
<25> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<26> B = torch.randn(dim*3, dim, dtype=dtype, device</s>
|
===========below chunk 0===========
<s>'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 1
qB, state = F.quantize_4bit(B, quant_type=storage_type, compress_statistics=double_quant)
C3 = torch.matmul(A, B.t())
C2 = F.gemv_4bit(A, qB.t(), state=state)
A.requires_grad = True
C1 = bnb.matmul_4bit(A, qB.t(), state)
err1 = (C1-C2).abs().float()
err2 = (C3-C2).abs().float()
err3 = (C3-C1).abs().float()
mag1 = torch.abs(C1).float()+1e-5
mag2 = torch.abs(C3).float()+1e-5
mag3 = torch.abs(C3).float()+1e-5
relerr1 = err1/mag1
relerr2 = err2/mag2
relerr3 = err3/mag3
max_err1 = err1.max()
max_err2 = err2.max()
max_err3 = err3.max()
errs1.append(err1.mean().item())
errs2.append(err2.mean().item())
errs3.append(err3.mean().item())
relerrs1.append(relerr1.mean().item())
relerrs2.append(relerr2.mean().item())
relerrs3.append(relerr3.mean().item())
max_</s>
===========below chunk 1===========
<s>'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 2
<s>relerr2.mean().item())
relerrs3.append(relerr3.mean().item())
max_errs1.append(max_err1.item())
max_errs2.append(max_err2.item())
max_errs3.append(max_err3.item())
c = int(C1.numel()*0.0014*(dim/256))+1
c = assert_all_approx_close(C1, C2, 1e-5, 0.01, count=c, throw=False)
err1 = sum(errs1)/len(errs1)/math.sqrt(dim)
err2 = sum(errs2)/len(errs2)/math.sqrt(dim)
err3 = sum(errs3)/len(errs3)/math.sqrt(dim)
relerr1 = sum(relerrs1)/len(relerrs1)/math.sqrt(dim)
relerr2 = sum(relerrs2)/len(relerrs2)/math.sqrt(dim)
relerr3 = sum(relerrs3)/len(relerrs3)/math.sqrt(dim)
maxerr1 = sum(max_errs1)/len(max_errs1)/math.sqrt(dim)
maxerr2 = sum(max_errs2)/len(max_errs2)/math.sqrt(dim)
maxerr3 = sum(max</s>
===========below chunk 2===========
<s>'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 3
<s>s3)/len(max_errs3)/math.sqrt(dim)
absratio = err2/err3
relratio = relerr2/relerr3
maxratio = relerr2/relerr3
# for debugging if the tests fails
#
#print('='*80)
#print(f'For matmul: {A.shape}, {B.shape}, {kind}, {dtype}, {storage_type}, double_quant={double_quant}:')
print(C1.flatten()[-20:])
print(C2.flatten()[-20:])
print(f'inference vs training abs: {err1}')
print(f'inference vs training rel: {relerr1}')
print(f'inference vs training max: {maxerr1}')
#print(f'inference vs training vs torch err ratio abs: {absratio}')
#print(f'inference vs training vs torch err ratio rel: {relratio}')
#print(f'inference vs training vs torch err ratio max: {maxratio}')
if dtype == torch.float16:
if dim <= 512:
assert err1 < 7e-5
assert relerr1 < 0.0008
else:
assert err1 < 6e-5
assert relerr1 < 2e-4
assert absratio < 1.005 and absratio > 0.995
assert relratio < 1.005 and rel</s>
===========below chunk 3===========
<s>'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 4
<s> 0.995
assert maxratio < 1.005 and maxratio > 0.995
elif dtype == torch.float32:
if dim <= 512:
assert err1 < 5e-8
assert relerr1 < 1e-6
assert maxerr1 < 1e-7
else:
assert err1 < 5e-8
assert relerr1 < 8e-6
assert maxerr1 < 1e-7
assert absratio < 1.005 and absratio > 0.995
assert relratio < 1.005 and relratio > 0.995
assert maxratio < 1.005 and maxratio > 0.995
elif dtype == torch.bfloat16:
if dim <= 512:
assert err1 < 6e-4
assert relerr1 < 0.007
assert maxerr1 < 0.015
else:
assert err1 < 2e-4
assert relerr1 < 0.002
assert maxerr1 < 0.0012
assert absratio < 1.005 and absratio > 0.995
assert relratio < 1.04 and relratio > 0.96
assert maxratio < 1.02 and maxratio > 0.98
|
|
bitsandbytes.nn.modules/Params4bit.from_prequantized
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
48b3e7703e676893789c56476a16db8a4571e01e
|
some renaming
|
<4>:<add> self.quant_state = QuantState.from_dict(quant_state_dict=quantized_stats, device=device)
<del> self.quant_state = QuantState.from_kwargs(kwargs=quantized_stats, device=device)
|
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
@classmethod
def from_prequantized(cls, quantized_stats, data=None, requires_grad=False, device='cuda', **kwargs):
<0> if data is None:
<1> data = quantized_stats.pop('weight')
<2> self = torch.Tensor._make_subclass(cls, data.to(device))
<3> self.requires_grad = requires_grad
<4> self.quant_state = QuantState.from_kwargs(kwargs=quantized_stats, device=device)
<5> self.blocksize = self.quant_state.blocksize
<6> self.compress_statistics = self.quant_state.nested
<7> self.quant_type = self.quant_state.quant_type
<8> return self
<9>
|
===========unchanged ref 0===========
at: bitsandbytes.functional
QuantState(absmax, shape=None, code=None, blocksize=None, quant_type=None, dtype=None, offset=None, state2=None)
at: bitsandbytes.functional.QuantState
from_dict(quant_state_dict: dict[str, torch.Tensor], device: torch.device) -> 'QuantState'
|
bitsandbytes.nn.modules/Linear4bit._save_to_state_dict
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
1d541b50ab97578d16117dd6f7b3477236c1c886
|
[WiP] rework of Q_state save format
|
<1>:<add> besides weight and bias,
<add> fill state_dict with components of quant_state
<del> fill state_dict with components of nf4
<2>:<del> TODO: test with other 4-bit Q-types
<4>:<add> if getattr(self.weight, "quant_state", None) is not None:
<add> quant_state_dict = self.weight.quant_state.as_dict()
<add> tensor_keys = [k for k, v in quant_state_dict.items() if isinstance(v, torch.Tensor)]
<add> for k in tensor_keys:
<add> destination[prefix + "weight." + k] = quant_state_dict.pop(k) if keep_vars else quant_state_dict.pop(k).detach()
<add> destination[prefix + "weight." + "quant_state_dict"] = quant_state_dict
<add> destination[prefix + "weight." + "quantization_method"] = "bitsandbytes." + quant_state_dict["quant_type"]
<del> self._update_buffers() # link the quant_state items with _buffers
|
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
def _save_to_state_dict(self, destination, prefix, keep_vars):
<0> """
<1> fill state_dict with components of nf4
<2> TODO: test with other 4-bit Q-types
<3> """
<4> self._update_buffers() # link the quant_state items with _buffers
<5> super()._save_to_state_dict(destination, prefix, keep_vars) # saving weight and bias
<6>
|
===========unchanged ref 0===========
at: bitsandbytes.autograd._functions
matmul_4bit(A: tensor, B: tensor, quant_state: F.QuantState, out: tensor=None, bias=None)
at: bitsandbytes.nn.modules.Linear4bit.__init__
self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
self.compute_dtype = compute_dtype
at: bitsandbytes.nn.modules.Linear4bit.set_compute_type
self.compute_dtype = x.dtype
at: bitsandbytes.nn.modules.Params4bit.cuda
self.quant_state = quant_state
at: torch.nn.modules.linear.Linear.__init__
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
===========changed ref 0===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
- def _update_buffers(self):
-
- def string_to_tensor(s):
- """stores string as ints for serialization. assumes codes fit int16"""
- return torch.tensor([ord(x) for x in s], dtype=torch.int16)
-
- if getattr(self.weight, 'quant_state', None) is not None:
- weight_quant_state = self.weight.quant_state
- self.register_buffer('absmax', weight_quant_state.absmax)
- self.register_buffer('shape', torch.tensor(weight_quant_state.shape))
- self.register_buffer('dtype', string_to_tensor(str(weight_quant_state.dtype).strip('torch')))
- self.register_buffer('blocksize', torch.tensor(weight_quant_state.blocksize))
- self.register_buffer('quant_type', string_to_tensor(weight_quant_state.quant_type))
- self.register_buffer('code', weight_quant_state.code)
-
- if weight_quant_state.nested:
- self.register_buffer('nested_offset', weight_quant_state.offset)
- self.register_buffer('nested_absmax', weight_quant_state.state2.absmax)
- self.register_buffer('nested_code', weight_quant_state.state2.code)
- self.register_buffer('nested_blocksize', torch.tensor(weight_quant_state.state2.blocksize))
- self.register_buffer('nested_dtype', string_to_tensor(str(weight_quant_state.state2.dtype).strip('torch')))
-
|
bitsandbytes.functional/QuantState.from_dict
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
1d541b50ab97578d16117dd6f7b3477236c1c886
|
[WiP] rework of Q_state save format
|
<4>:<del> tensor2str = lambda xx: ''.join([chr(x) for x in xx]).strip('.')
<6>:<add> quant_state_dict = {k.split('.')[-1]:v for k, v in quant_state_dict.items()}
<del> quant_state_dict = {k.split('.')[-1] :v for k, v in quant_state_dict.items()}
<7>:<add> if 'quant_state_dict' in quant_state_dict:
<add> quant_state_dict|= quant_state_dict.pop('quant_state_dict')
<add>
<del>
<9>:<add> offset = torch.tensor(float(quant_state_dict['nested_offset'])).to(device)
<del> offset = quant_state_dict['nested_offset']
<13>:<add> blocksize=int(quant_state_dict['nested_blocksize']),
<del> blocksize=quant_state_dict['nested_blocksize'].item(),
<14>:<add> dtype=getattr(torch, quant_state_dict['nested_dtype']),
<del> dtype=getattr(torch, tensor2str(quant_state_dict['nested_dtype'])),
<20>:<add> absmax=quant_state_dict['absmax'].to(device),
<del> absmax=quant_state_dict['absmax'].to(device),
<21>:<add> shape=torch.Size(map(int, quant_state_dict['shape'].split('.'))),
<del> shape=torch.Size(quant_state_dict['shape']),
<22>:<add> dtype=getattr(torch, quant_state_dict['dtype']),
<del> dtype=getattr(torch, tensor2str(quant_state_dict['dtype'])),
<23>:<add> blocksize=int(quant_state_dict['block
|
# module: bitsandbytes.functional
class QuantState:
@classmethod
def from_dict(cls, quant_state_dict: dict[str, torch.Tensor], device: torch.device) -> 'QuantState':
<0> """
<1> unpacks dict of tensors into QuantState
<2> where necessary, convert into strings, torch.dtype, ints, etc.
<3> """
<4> tensor2str = lambda xx: ''.join([chr(x) for x in xx]).strip('.')
<5>
<6> quant_state_dict = {k.split('.')[-1] :v for k, v in quant_state_dict.items()}
<7>
<8> if 'nested_absmax' in quant_state_dict:
<9> offset = quant_state_dict['nested_offset']
<10> state2 = cls(
<11> absmax=quant_state_dict['nested_absmax'].to(device),
<12> code=quant_state_dict['nested_code'].to(device),
<13> blocksize=quant_state_dict['nested_blocksize'].item(),
<14> dtype=getattr(torch, tensor2str(quant_state_dict['nested_dtype'])),
<15> )
<16> else:
<17> offset, state2 = None, None
<18>
<19> quant_state = cls(
<20> absmax=quant_state_dict['absmax'].to(device),
<21> shape=torch.Size(quant_state_dict['shape']),
<22> dtype=getattr(torch, tensor2str(quant_state_dict['dtype'])),
<23> blocksize=quant_state_dict['blocksize'].item(),
<24> offset=offset,
<25> state2=state2,
<26> quant_type=tensor2str(quant_state_dict['quant_type']),
<27> code=quant_state_dict['code'].to(device),
<28> )
<29> return quant_state
<30>
|
===========unchanged ref 0===========
at: torch._C
device(device: Union[_device, _int, str])
device(type: str, index: _int)
Size()
at: torch._C._VariableFunctions
tensor(data: Any, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor
at: torch._tensor.Tensor
detach = _C._add_docstr(
_C._TensorBase.detach,
r"""
Returns a new Tensor, detached from the current graph.
The result will never require gradient.
This method also affects forward mode AD gradients and the result will never
have forward mode AD gradients.
.. note::
Returned Tensor shares the same storage with the original one.
In-place modifications on either of them will be seen, and may trigger
errors in correctness checks.
IMPORTANT NOTE: Previously, in-place size / stride / storage changes
(such as `resize_` / `resize_as_` / `set_` / `transpose_`) to the returned tensor
also update the original tensor. Now, these in-place changes will not update the
original tensor anymore, and will instead trigger an error.
For sparse tensors:
In-place indices / values changes (such as `zero_` / `copy_` / `add_`) to the
returned tensor will not update the original tensor anymore, and will instead
trigger an error.
""",
)
detach_ = _C._add_docstr(
_C._TensorBase.detach_,
r"""
Detaches the Tensor from the graph that created it, making it a leaf.
Views cannot be detached in-place.
This method also affects forward mode AD gradients and the result will never
have forward mode AD gradients.
""",
)
split(split_size, dim=0)
__rtruediv__ = __rdiv__
__itruediv__ = _C._TensorBase.__idiv__
===========unchanged ref 1===========
__pow__ = _handle_torch_function_and_wrap_type_error_to_not_implemented(
_C._TensorBase.pow
)
__ipow__ = _handle_torch_function_and_wrap_type_error_to_not_implemented(
_C._TensorBase.pow_
)
__pos__ = _C._TensorBase.positive
__neg__ = _C._TensorBase.neg
__abs__ = _C._TensorBase.abs
__array_priority__ = 1000 # prefer Tensor ops over numpy ones
__torch_dispatch__ = _C._disabled_torch_dispatch_impl
__module__ = "torch"
at: typing.MutableMapping
pop(key: _KT) -> _VT
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
===========changed ref 0===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
def _save_to_state_dict(self, destination, prefix, keep_vars):
"""
+ besides weight and bias,
+ fill state_dict with components of quant_state
- fill state_dict with components of nf4
- TODO: test with other 4-bit Q-types
"""
+ if getattr(self.weight, "quant_state", None) is not None:
+ quant_state_dict = self.weight.quant_state.as_dict()
+ tensor_keys = [k for k, v in quant_state_dict.items() if isinstance(v, torch.Tensor)]
+ for k in tensor_keys:
+ destination[prefix + "weight." + k] = quant_state_dict.pop(k) if keep_vars else quant_state_dict.pop(k).detach()
+ destination[prefix + "weight." + "quant_state_dict"] = quant_state_dict
+ destination[prefix + "weight." + "quantization_method"] = "bitsandbytes." + quant_state_dict["quant_type"]
- self._update_buffers() # link the quant_state items with _buffers
super()._save_to_state_dict(destination, prefix, keep_vars) # saving weight and bias
===========changed ref 1===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
- def _update_buffers(self):
-
- def string_to_tensor(s):
- """stores string as ints for serialization. assumes codes fit int16"""
- return torch.tensor([ord(x) for x in s], dtype=torch.int16)
-
- if getattr(self.weight, 'quant_state', None) is not None:
- weight_quant_state = self.weight.quant_state
- self.register_buffer('absmax', weight_quant_state.absmax)
- self.register_buffer('shape', torch.tensor(weight_quant_state.shape))
- self.register_buffer('dtype', string_to_tensor(str(weight_quant_state.dtype).strip('torch')))
- self.register_buffer('blocksize', torch.tensor(weight_quant_state.blocksize))
- self.register_buffer('quant_type', string_to_tensor(weight_quant_state.quant_type))
- self.register_buffer('code', weight_quant_state.code)
-
- if weight_quant_state.nested:
- self.register_buffer('nested_offset', weight_quant_state.offset)
- self.register_buffer('nested_absmax', weight_quant_state.state2.absmax)
- self.register_buffer('nested_code', weight_quant_state.state2.code)
- self.register_buffer('nested_blocksize', torch.tensor(weight_quant_state.state2.blocksize))
- self.register_buffer('nested_dtype', string_to_tensor(str(weight_quant_state.state2.dtype).strip('torch')))
-
|
bitsandbytes.nn.modules/Linear4bit._save_to_state_dict
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6a934d4f40d9a3a385c890379abaa7198faf6492
|
reorder state_dict
|
<4>:<add> super()._save_to_state_dict(destination, prefix, keep_vars) # saving weight and bias
<add>
<11>:<del> super()._save_to_state_dict(destination, prefix, keep_vars) # saving weight and bias
|
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
def _save_to_state_dict(self, destination, prefix, keep_vars):
<0> """
<1> besides weight and bias,
<2> fill state_dict with components of quant_state
<3> """
<4> if getattr(self.weight, "quant_state", None) is not None:
<5> quant_state_dict = self.weight.quant_state.as_dict()
<6> tensor_keys = [k for k, v in quant_state_dict.items() if isinstance(v, torch.Tensor)]
<7> for k in tensor_keys:
<8> destination[prefix + "weight." + k] = quant_state_dict.pop(k) if keep_vars else quant_state_dict.pop(k).detach()
<9> destination[prefix + "weight." + "quant_state_dict"] = quant_state_dict
<10> destination[prefix + "weight." + "quantization_method"] = "bitsandbytes." + quant_state_dict["quant_type"]
<11> super()._save_to_state_dict(destination, prefix, keep_vars) # saving weight and bias
<12>
|
===========unchanged ref 0===========
at: bitsandbytes.nn.modules.Linear4bit.__init__
self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
at: bitsandbytes.nn.modules.Params4bit.cuda
self.quant_state = quant_state
at: torch.nn.modules.module.Module
dump_patches: bool = False
_version: int = 1
training: bool
_parameters: Dict[str, Optional[Parameter]]
_buffers: Dict[str, Optional[Tensor]]
_non_persistent_buffers_set: Set[str]
_backward_pre_hooks: Dict[int, Callable]
_backward_hooks: Dict[int, Callable]
_is_full_backward_hook: Optional[bool]
_forward_hooks: Dict[int, Callable]
_forward_hooks_with_kwargs: Dict[int, bool]
_forward_hooks_always_called: Dict[int, bool]
_forward_pre_hooks: Dict[int, Callable]
_forward_pre_hooks_with_kwargs: Dict[int, bool]
_state_dict_hooks: Dict[int, Callable]
_load_state_dict_pre_hooks: Dict[int, Callable]
_state_dict_pre_hooks: Dict[int, Callable]
_load_state_dict_post_hooks: Dict[int, Callable]
_modules: Dict[str, Optional['Module']]
call_super_init: bool = False
_compiled_call_impl : Optional[Callable] = None
forward: Callable[..., Any] = _forward_unimplemented
__call__ : Callable[..., Any] = _wrapped_call_impl
_save_to_state_dict(self, destination, prefix, keep_vars)
_save_to_state_dict(destination, prefix, keep_vars)
===========unchanged ref 1===========
T_destination = TypeVar('T_destination', bound=Dict[str, Any])
|
bitsandbytes.nn.modules/Params4bit.from_prequantized
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6cf0f05d632f963b5ab71f81d04873a555f93947
|
rework of non-tensor qs items storage
|
<4>:<add> self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
<del> self.quant_state = QuantState.from_dict(quant_state_dict=quantized_stats, device=device)
|
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
@classmethod
def from_prequantized(cls, quantized_stats, data=None, requires_grad=False, device='cuda', **kwargs):
<0> if data is None:
<1> data = quantized_stats.pop('weight')
<2> self = torch.Tensor._make_subclass(cls, data.to(device))
<3> self.requires_grad = requires_grad
<4> self.quant_state = QuantState.from_dict(quant_state_dict=quantized_stats, device=device)
<5> self.blocksize = self.quant_state.blocksize
<6> self.compress_statistics = self.quant_state.nested
<7> self.quant_type = self.quant_state.quant_type
<8> return self
<9>
|
===========unchanged ref 0===========
at: bitsandbytes.functional
QuantState(absmax, shape=None, code=None, blocksize=None, quant_type=None, dtype=None, offset=None, state2=None)
at: bitsandbytes.functional.QuantState
from_dict(qs_dict: dict[str, Any], device: torch.device) -> 'QuantState'
===========changed ref 0===========
# module: bitsandbytes.utils
+ def unpack_tensor_to_dict(tensor_data):
+ """
+ Unpack a torch tensor into a Python dictionary.
+
+ Parameters:
+ - tensor_data: The torch tensor containing the packed data.
+
+ Returns:
+ A Python dictionary containing the unpacked data.
+ """
+ json_bytes = bytes(tensor_data.numpy())
+ json_str = json_bytes.decode('utf-8')
+ unpacked_dict = json.loads(json_str)
+
+ return unpacked_dict
+
===========changed ref 1===========
# module: bitsandbytes.utils
+ def pack_dict_to_tensor(source_dict):
+ """
+ Pack a dictionary into a torch tensor for storing quant_state items in state_dict.
+
+ Parameters:
+ - source_dict: The dictionary to be packed.
+
+ Returns:
+ A torch tensor containing the packed data.
+ """
+ json_str = json.dumps(source_dict)
+ json_bytes = json_str.encode('utf-8')
+ tensor_data = torch.tensor(list(json_bytes), dtype=torch.uint8)
+
+ return tensor_data
+
|
bitsandbytes.nn.modules/Linear4bit._save_to_state_dict
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6cf0f05d632f963b5ab71f81d04873a555f93947
|
rework of non-tensor qs items storage
|
<1>:<add> save weight and bias,
<del> besides weight and bias,
<2>:<add> then fill state_dict with components of quant_state
<del> fill state_dict with components of quant_state
<7>:<del> quant_state_dict = self.weight.quant_state.as_dict()
<8>:<del> tensor_keys = [k for k, v in quant_state_dict.items() if isinstance(v, torch.Tensor)]
<9>:<del> for k in tensor_keys:
<10>:<add> for k, v in self.weight.quant_state.as_dict(packed=True).items():
<add> destination[prefix + "weight." + k] = v if keep_vars else v.detach()
<del> destination[prefix + "weight." + k] = quant_state_dict.pop(k) if keep_vars else quant_state_dict.pop(k).detach()
<11>:<del> destination[prefix + "weight." + "quant_state_dict"] = quant_state_dict
<12>:<del> destination[prefix + "weight." + "quantization_method"] = "bitsandbytes." + quant_state_dict["quant_type"]
|
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
def _save_to_state_dict(self, destination, prefix, keep_vars):
<0> """
<1> besides weight and bias,
<2> fill state_dict with components of quant_state
<3> """
<4> super()._save_to_state_dict(destination, prefix, keep_vars) # saving weight and bias
<5>
<6> if getattr(self.weight, "quant_state", None) is not None:
<7> quant_state_dict = self.weight.quant_state.as_dict()
<8> tensor_keys = [k for k, v in quant_state_dict.items() if isinstance(v, torch.Tensor)]
<9> for k in tensor_keys:
<10> destination[prefix + "weight." + k] = quant_state_dict.pop(k) if keep_vars else quant_state_dict.pop(k).detach()
<11> destination[prefix + "weight." + "quant_state_dict"] = quant_state_dict
<12> destination[prefix + "weight." + "quantization_method"] = "bitsandbytes." + quant_state_dict["quant_type"]
<13>
|
===========unchanged ref 0===========
at: bitsandbytes.nn.modules.Linear4bit.__init__
self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
at: bitsandbytes.nn.modules.Params4bit.cuda
self.quant_state = quant_state
at: torch.nn.modules.linear.Linear
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
forward(self, input: Tensor) -> Tensor
at: torch.nn.modules.linear.Linear.__init__
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
at: torch.nn.modules.module.Module
dump_patches: bool = False
_version: int = 1
training: bool
_parameters: Dict[str, Optional[Parameter]]
_buffers: Dict[str, Optional[Tensor]]
_non_persistent_buffers_set: Set[str]
_backward_pre_hooks: Dict[int, Callable]
_backward_hooks: Dict[int, Callable]
_is_full_backward_hook: Optional[bool]
_forward_hooks: Dict[int, Callable]
_forward_hooks_with_kwargs: Dict[int, bool]
_forward_hooks_always_called: Dict[int, bool]
_forward_pre_hooks: Dict[int, Callable]
_forward_pre_hooks_with_kwargs: Dict[int, bool]
_state_dict_hooks: Dict[int, Callable]
_load_state_dict_pre_hooks: Dict[int, Callable]
_state_dict_pre_hooks: Dict[int, Callable]
_load_state_dict_post_hooks: Dict[int, Callable]
_modules: Dict[str, Optional['Module']]
call_super_init: bool = False
===========unchanged ref 1===========
_compiled_call_impl : Optional[Callable] = None
forward: Callable[..., Any] = _forward_unimplemented
__call__ : Callable[..., Any] = _wrapped_call_impl
_save_to_state_dict(self, destination, prefix, keep_vars)
_save_to_state_dict(destination, prefix, keep_vars)
T_destination = TypeVar('T_destination', bound=Dict[str, Any])
===========changed ref 0===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
@classmethod
def from_prequantized(cls, quantized_stats, data=None, requires_grad=False, device='cuda', **kwargs):
if data is None:
data = quantized_stats.pop('weight')
self = torch.Tensor._make_subclass(cls, data.to(device))
self.requires_grad = requires_grad
+ self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
- self.quant_state = QuantState.from_dict(quant_state_dict=quantized_stats, device=device)
self.blocksize = self.quant_state.blocksize
self.compress_statistics = self.quant_state.nested
self.quant_type = self.quant_state.quant_type
return self
===========changed ref 1===========
# module: bitsandbytes.utils
+ def unpack_tensor_to_dict(tensor_data):
+ """
+ Unpack a torch tensor into a Python dictionary.
+
+ Parameters:
+ - tensor_data: The torch tensor containing the packed data.
+
+ Returns:
+ A Python dictionary containing the unpacked data.
+ """
+ json_bytes = bytes(tensor_data.numpy())
+ json_str = json_bytes.decode('utf-8')
+ unpacked_dict = json.loads(json_str)
+
+ return unpacked_dict
+
===========changed ref 2===========
# module: bitsandbytes.utils
+ def pack_dict_to_tensor(source_dict):
+ """
+ Pack a dictionary into a torch tensor for storing quant_state items in state_dict.
+
+ Parameters:
+ - source_dict: The dictionary to be packed.
+
+ Returns:
+ A torch tensor containing the packed data.
+ """
+ json_str = json.dumps(source_dict)
+ json_bytes = json_str.encode('utf-8')
+ tensor_data = torch.tensor(list(json_bytes), dtype=torch.uint8)
+
+ return tensor_data
+
|
bitsandbytes.functional/QuantState.from_dict
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6cf0f05d632f963b5ab71f81d04873a555f93947
|
rework of non-tensor qs items storage
|
<3>:<add>
<add> quant_state_dict may contain item with non-tensor components with key like
<add> `...weight.quant_state.bitsandbytes__[nf4/fp4]`
<add> it is detected with key strored in qs_key, and then unpacked
<5>:<add> # unpacking tensor with non-tensor components
<add> qs_key = [k for k, v in qs_dict.items() if "quant_state" in k and isinstance(v, torch.Tensor)]
<add> if len(qs_key) == 1:
<add> qs_key = qs_key[0]
<add> assert 'bitsandbytes__nf4' in qs_key or 'bitsandbytes__fp4' in qs_key, \
<add> f"invalid qs_key value {qs_key}"
<add> qs_dict |= unpack_tensor_to_dict(qs_dict.pop(qs_key))
<del> quant_state_dict = {k.split('.')[-1]:v for k, v in quant_state_dict.items()}
<6>:<del> if 'quant_state_dict' in quant_state_dict:
<7>:<del> quant_state_dict|= quant_state_dict.pop('quant_state_dict')
<9>:<add> qs_dict = {k.split('.')[-1]:v for k, v in qs_dict.items()} # strip prefixes
|
# module: bitsandbytes.functional
class QuantState:
@classmethod
+ def from_dict(cls, qs_dict: dict[str, Any], device: torch.device) -> 'QuantState':
- def from_dict(cls, quant_state_dict: dict[str, torch.Tensor], device: torch.device) -> 'QuantState':
<0> """
<1> unpacks dict of tensors into QuantState
<2> where necessary, convert into strings, torch.dtype, ints, etc.
<3> """
<4>
<5> quant_state_dict = {k.split('.')[-1]:v for k, v in quant_state_dict.items()}
<6> if 'quant_state_dict' in quant_state_dict:
<7> quant_state_dict|= quant_state_dict.pop('quant_state_dict')
<8>
<9> if 'nested_absmax' in quant_state_dict:
<10> offset = torch.tensor(float(quant_state_dict['nested_offset'])).to(device)
<11> state2 = cls(
<12> absmax=quant_state_dict['nested_absmax'].to(device),
<13> code=quant_state_dict['nested_code'].to(device),
<14> blocksize=int(quant_state_dict['nested_blocksize']),
<15> dtype=getattr(torch, quant_state_dict['nested_dtype']),
<16> )
<17> else:
<18> offset, state2 = None, None
<19>
<20> quant_state = cls(
<21> absmax=quant_state_dict['absmax'].to(device),
<22> shape=torch.Size(map(int, quant_state_dict['shape'].split('.'))),
<23> dtype=getattr(torch, quant_state_dict['dtype']),
<24> blocksize=int(quant_state_dict['blocksize']),
<25> offset=offset,
<26> state2=state2,
<27> quant_type=quant_state_dict['quant_type'],
<28> code=quant_state_dict['code'].to(device),
<29> )
<30> return quant_state
</s>
|
===========unchanged ref 0===========
at: bitsandbytes.utils
unpack_tensor_to_dict(tensor_data)
at: torch._C
device(device: Union[_device, _int, str])
device(type: str, index: _int)
at: torch._C._VariableFunctions
tensor(data: Any, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor
at: typing.MutableMapping
pop(key: _KT) -> _VT
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
===========changed ref 0===========
# module: bitsandbytes.utils
+ def unpack_tensor_to_dict(tensor_data):
+ """
+ Unpack a torch tensor into a Python dictionary.
+
+ Parameters:
+ - tensor_data: The torch tensor containing the packed data.
+
+ Returns:
+ A Python dictionary containing the unpacked data.
+ """
+ json_bytes = bytes(tensor_data.numpy())
+ json_str = json_bytes.decode('utf-8')
+ unpacked_dict = json.loads(json_str)
+
+ return unpacked_dict
+
===========changed ref 1===========
# module: bitsandbytes.utils
+ def pack_dict_to_tensor(source_dict):
+ """
+ Pack a dictionary into a torch tensor for storing quant_state items in state_dict.
+
+ Parameters:
+ - source_dict: The dictionary to be packed.
+
+ Returns:
+ A torch tensor containing the packed data.
+ """
+ json_str = json.dumps(source_dict)
+ json_bytes = json_str.encode('utf-8')
+ tensor_data = torch.tensor(list(json_bytes), dtype=torch.uint8)
+
+ return tensor_data
+
===========changed ref 2===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
@classmethod
def from_prequantized(cls, quantized_stats, data=None, requires_grad=False, device='cuda', **kwargs):
if data is None:
data = quantized_stats.pop('weight')
self = torch.Tensor._make_subclass(cls, data.to(device))
self.requires_grad = requires_grad
+ self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
- self.quant_state = QuantState.from_dict(quant_state_dict=quantized_stats, device=device)
self.blocksize = self.quant_state.blocksize
self.compress_statistics = self.quant_state.nested
self.quant_type = self.quant_state.quant_type
return self
===========changed ref 3===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
def _save_to_state_dict(self, destination, prefix, keep_vars):
"""
+ save weight and bias,
- besides weight and bias,
+ then fill state_dict with components of quant_state
- fill state_dict with components of quant_state
"""
super()._save_to_state_dict(destination, prefix, keep_vars) # saving weight and bias
if getattr(self.weight, "quant_state", None) is not None:
- quant_state_dict = self.weight.quant_state.as_dict()
- tensor_keys = [k for k, v in quant_state_dict.items() if isinstance(v, torch.Tensor)]
- for k in tensor_keys:
+ for k, v in self.weight.quant_state.as_dict(packed=True).items():
+ destination[prefix + "weight." + k] = v if keep_vars else v.detach()
- destination[prefix + "weight." + k] = quant_state_dict.pop(k) if keep_vars else quant_state_dict.pop(k).detach()
- destination[prefix + "weight." + "quant_state_dict"] = quant_state_dict
- destination[prefix + "weight." + "quantization_method"] = "bitsandbytes." + quant_state_dict["quant_type"]
|
bitsandbytes.functional/QuantState.as_dict
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6cf0f05d632f963b5ab71f81d04873a555f93947
|
rework of non-tensor qs items storage
|
<0>:<add> """
<add> returns dict of tensors and strings to use in serialization via _save_to_state_dict()
<del> """dict of tensors and strings to use in serialization via _save_to_state_dict()"""
<1>:<add> param: packed -- returns dict[str, torch.Tensor] for state_dict
<add> """
<4>:<add> 'shape': tuple(self.shape),
<del> 'shape': ','.join(map(str, self.shape)),
<5>:<add> 'dtype': str(self.dtype).strip('torch.'),
<del> 'dtype': str(self.dtype).strip('torch'),
<6>:<add> 'blocksize': self.blocksize,
<del> 'blocksize': str(self.blocksize),
<13>:<add> 'nested_offset': self.offset.item(),
<del> 'nested_offset': f"{self.offset.item()}",
<14>:<add> 'nested_blocksize': self.state2.blocksize,
<del> 'nested_blocksize': str(self.state2.blocksize),
<15>:<add> 'nested_dtype': str(self.state2.dtype).strip('torch.'),
<del> 'nested_dtype': str(self.state2.dtype).strip('torch'),
<17>:<add> if not packed:
<add> return qs_dict
<del> return qs_dict
|
# module: bitsandbytes.functional
class QuantState:
+ def as_dict(self, packed=False):
- def as_dict(self):
<0> """dict of tensors and strings to use in serialization via _save_to_state_dict()"""
<1> qs_dict = {
<2> 'absmax': self.absmax,
<3> 'code': self.code,
<4> 'shape': ','.join(map(str, self.shape)),
<5> 'dtype': str(self.dtype).strip('torch'),
<6> 'blocksize': str(self.blocksize),
<7> 'quant_type': self.quant_type,
<8> }
<9> if self.nested:
<10> qs_dict.update({
<11> 'nested_absmax': self.state2.absmax,
<12> 'nested_code': self.state2.code,
<13> 'nested_offset': f"{self.offset.item()}",
<14> 'nested_blocksize': str(self.state2.blocksize),
<15> 'nested_dtype': str(self.state2.dtype).strip('torch'),
<16> })
<17> return qs_dict
<18>
|
===========unchanged ref 0===========
at: bitsandbytes.functional.QuantState.__init__
self.absmax = absmax
self.code = code
at: bitsandbytes.functional.QuantState.from_dict
qs_dict = {k.split('.')[-1]:v for k, v in qs_dict.items()} # strip prefixes
qs_dict |= unpack_tensor_to_dict(qs_dict.pop(qs_key))
offset = torch.tensor(float(qs_dict['nested_offset'])).to(device)
offset, state2 = None, None
state2 = cls(
absmax=qs_dict['nested_absmax'].to(device),
code=qs_dict['nested_code'].to(device),
blocksize=qs_dict['nested_blocksize'],
dtype=getattr(torch, qs_dict['nested_dtype']),
)
offset, state2 = None, None
quant_state = cls(
absmax=qs_dict['absmax'].to(device),
shape=torch.Size(qs_dict['shape']),
dtype=getattr(torch, qs_dict['dtype']),
blocksize=qs_dict['blocksize'],
offset=offset,
state2=state2,
quant_type=qs_dict['quant_type'],
code=qs_dict['code'].to(device),
)
at: bitsandbytes.functional.QuantState.to
self.absmax = self.absmax.to(device)
at: torch._C
Size()
===========changed ref 0===========
# module: bitsandbytes.functional
class QuantState:
@classmethod
+ def from_dict(cls, qs_dict: dict[str, Any], device: torch.device) -> 'QuantState':
- def from_dict(cls, quant_state_dict: dict[str, torch.Tensor], device: torch.device) -> 'QuantState':
"""
unpacks dict of tensors into QuantState
where necessary, convert into strings, torch.dtype, ints, etc.
+
+ quant_state_dict may contain item with non-tensor components with key like
+ `...weight.quant_state.bitsandbytes__[nf4/fp4]`
+ it is detected with key strored in qs_key, and then unpacked
"""
+ # unpacking tensor with non-tensor components
+ qs_key = [k for k, v in qs_dict.items() if "quant_state" in k and isinstance(v, torch.Tensor)]
+ if len(qs_key) == 1:
+ qs_key = qs_key[0]
+ assert 'bitsandbytes__nf4' in qs_key or 'bitsandbytes__fp4' in qs_key, \
+ f"invalid qs_key value {qs_key}"
+ qs_dict |= unpack_tensor_to_dict(qs_dict.pop(qs_key))
- quant_state_dict = {k.split('.')[-1]:v for k, v in quant_state_dict.items()}
- if 'quant_state_dict' in quant_state_dict:
- quant_state_dict|= quant_state_dict.pop('quant_state_dict')
+ qs_dict = {k.split('.')[-1]:v for k, v in qs_dict.items()} # strip prefixes
+
+ if 'nested_absmax' in qs_dict:
- if 'nested_absmax' in quant_state_dict:
+ offset = torch.tensor(float(qs_dict['nested_offset'])).to(device)
- offset = torch.tensor(float(quant</s>
===========changed ref 1===========
# module: bitsandbytes.functional
class QuantState:
@classmethod
+ def from_dict(cls, qs_dict: dict[str, Any], device: torch.device) -> 'QuantState':
- def from_dict(cls, quant_state_dict: dict[str, torch.Tensor], device: torch.device) -> 'QuantState':
# offset: 1
<s>.tensor(float(qs_dict['nested_offset'])).to(device)
- offset = torch.tensor(float(quant_state_dict['nested_offset'])).to(device)
state2 = cls(
+ absmax=qs_dict['nested_absmax'].to(device),
- absmax=quant_state_dict['nested_absmax'].to(device),
+ code=qs_dict['nested_code'].to(device),
- code=quant_state_dict['nested_code'].to(device),
+ blocksize=qs_dict['nested_blocksize'],
- blocksize=int(quant_state_dict['nested_blocksize']),
+ dtype=getattr(torch, qs_dict['nested_dtype']),
- dtype=getattr(torch, quant_state_dict['nested_dtype']),
)
else:
offset, state2 = None, None
quant_state = cls(
+ absmax=qs_dict['absmax'].to(device),
- absmax=quant_state_dict['absmax'].to(device),
+ shape=torch.Size(qs_dict['shape']),
- shape=torch.Size(map(int, quant_state_dict['shape'].split('.'))),
+ dtype=getattr(torch, qs_dict['dtype']),
- dtype=getattr(torch, quant_state_dict['dtype']),
+ blocksize=qs_dict['blocksize'],
- blocksize=int(quant_state_dict['blocksize']),
offset=offset,
state2=</s>
===========changed ref 2===========
# module: bitsandbytes.functional
class QuantState:
@classmethod
+ def from_dict(cls, qs_dict: dict[str, Any], device: torch.device) -> 'QuantState':
- def from_dict(cls, quant_state_dict: dict[str, torch.Tensor], device: torch.device) -> 'QuantState':
# offset: 2
<s>,
+ quant_type=qs_dict['quant_type'],
- quant_type=quant_state_dict['quant_type'],
+ code=qs_dict['code'].to(device),
- code=quant_state_dict['code'].to(device),
)
return quant_state
===========changed ref 3===========
# module: bitsandbytes.utils
+ def unpack_tensor_to_dict(tensor_data):
+ """
+ Unpack a torch tensor into a Python dictionary.
+
+ Parameters:
+ - tensor_data: The torch tensor containing the packed data.
+
+ Returns:
+ A Python dictionary containing the unpacked data.
+ """
+ json_bytes = bytes(tensor_data.numpy())
+ json_str = json_bytes.decode('utf-8')
+ unpacked_dict = json.loads(json_str)
+
+ return unpacked_dict
+
===========changed ref 4===========
# module: bitsandbytes.utils
+ def pack_dict_to_tensor(source_dict):
+ """
+ Pack a dictionary into a torch tensor for storing quant_state items in state_dict.
+
+ Parameters:
+ - source_dict: The dictionary to be packed.
+
+ Returns:
+ A torch tensor containing the packed data.
+ """
+ json_str = json.dumps(source_dict)
+ json_bytes = json_str.encode('utf-8')
+ tensor_data = torch.tensor(list(json_bytes), dtype=torch.uint8)
+
+ return tensor_data
+
|
bitsandbytes.nn.modules/Params4bit.from_prequantized
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
7a117e447e1545b0cd8717b85ee4bff502b9f60e
|
cleanup1
|
<1>:<add> weight_key = [k for k in quantized_stats if k.endswith(".weight")][0]
<add> data = quantized_stats.pop(weight_key)
<del> data = quantized_stats.pop('weight')
|
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
@classmethod
def from_prequantized(cls, quantized_stats, data=None, requires_grad=False, device='cuda', **kwargs):
<0> if data is None:
<1> data = quantized_stats.pop('weight')
<2> self = torch.Tensor._make_subclass(cls, data.to(device))
<3> self.requires_grad = requires_grad
<4> self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
<5> self.blocksize = self.quant_state.blocksize
<6> self.compress_statistics = self.quant_state.nested
<7> self.quant_type = self.quant_state.quant_type
<8> return self
<9>
|
===========unchanged ref 0===========
at: bitsandbytes.functional
QuantState(absmax, shape=None, code=None, blocksize=None, quant_type=None, dtype=None, offset=None, state2=None)
at: bitsandbytes.functional.QuantState
from_dict(qs_dict: dict[str, Any], device: torch.device) -> 'QuantState'
|
bitsandbytes.functional/QuantState.from_dict
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
7a117e447e1545b0cd8717b85ee4bff502b9f60e
|
cleanup1
|
<11>:<add> assert len(qs_key) == 1 or not qs_key and 'quant_type' in qs_dict, \
<add> f"`qs_dict` must contain packed quant_state items, or be unpacked. Found keys: {tuple(qs_dict.keys())}"
<23>:<add> blocksize=qs_dict['nested_blocksize'],
<24>:<del> blocksize=qs_dict['nested_blocksize'],
<31>:<add> quant_type=qs_dict['quant_type'],
<32>:<add> blocksize=qs_dict['blocksize'],
<add> code=qs_dict['code'].to(device),
<add> dtype=getattr(torch, qs_dict['dtype']),
|
# module: bitsandbytes.functional
class QuantState:
@classmethod
def from_dict(cls, qs_dict: dict[str, Any], device: torch.device) -> 'QuantState':
<0> """
<1> unpacks dict of tensors into QuantState
<2> where necessary, convert into strings, torch.dtype, ints, etc.
<3>
<4> quant_state_dict may contain item with non-tensor components with key like
<5> `...weight.quant_state.bitsandbytes__[nf4/fp4]`
<6> it is detected with key strored in qs_key, and then unpacked
<7> """
<8>
<9> # unpacking tensor with non-tensor components
<10> qs_key = [k for k, v in qs_dict.items() if "quant_state" in k and isinstance(v, torch.Tensor)]
<11> if len(qs_key) == 1:
<12> qs_key = qs_key[0]
<13> assert 'bitsandbytes__nf4' in qs_key or 'bitsandbytes__fp4' in qs_key, \
<14> f"invalid qs_key value {qs_key}"
<15> qs_dict |= unpack_tensor_to_dict(qs_dict.pop(qs_key))
<16>
<17> qs_dict = {k.split('.')[-1]:v for k, v in qs_dict.items()} # strip prefixes
<18>
<19> if 'nested_absmax' in qs_dict:
<20> offset = torch.tensor(float(qs_dict['nested_offset'])).to(device)
<21> state2 = cls(
<22> absmax=qs_dict['nested_absmax'].to(device),
<23> code=qs_dict['nested_code'].to(device),
<24> blocksize=qs_dict['nested_blocksize'],
<25> dtype=getattr(torch, qs_dict['nested_dtype']),
<26> )
<27> else:
<28> offset, state2 = None, None
<29>
<30> quant_state = cls(
<31> absmax=qs_dict['absmax'].to(device),
<32> shape=torch.</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
class QuantState:
@classmethod
def from_dict(cls, qs_dict: dict[str, Any], device: torch.device) -> 'QuantState':
# offset: 1
dtype=getattr(torch, qs_dict['dtype']),
blocksize=qs_dict['blocksize'],
offset=offset,
state2=state2,
quant_type=qs_dict['quant_type'],
code=qs_dict['code'].to(device),
)
return quant_state
===========unchanged ref 0===========
at: bitsandbytes.utils
unpack_tensor_to_dict(tensor_data)
at: torch._C
device(device: Union[_device, _int, str])
device(type: str, index: _int)
Size()
at: torch._C._VariableFunctions
tensor(data: Any, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor
at: typing.MutableMapping
pop(key: _KT) -> _VT
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
===========changed ref 0===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
@classmethod
def from_prequantized(cls, quantized_stats, data=None, requires_grad=False, device='cuda', **kwargs):
if data is None:
+ weight_key = [k for k in quantized_stats if k.endswith(".weight")][0]
+ data = quantized_stats.pop(weight_key)
- data = quantized_stats.pop('weight')
self = torch.Tensor._make_subclass(cls, data.to(device))
self.requires_grad = requires_grad
self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
self.blocksize = self.quant_state.blocksize
self.compress_statistics = self.quant_state.nested
self.quant_type = self.quant_state.quant_type
return self
|
bitsandbytes.functional/QuantState.as_dict
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
7a117e447e1545b0cd8717b85ee4bff502b9f60e
|
cleanup1
|
<5>:<add> 'quant_type': self.quant_type,
<6>:<add> 'blocksize': self.blocksize,
<7>:<del> 'shape': tuple(self.shape),
<9>:<del> 'blocksize': self.blocksize,
<10>:<del> 'quant_type': self.quant_type,
<11>:<add> 'shape': tuple(self.shape) if self.nested else None,
<15>:<add> 'nested_blocksize': self.state2.blocksize,
<16>:<add> 'nested_dtype': str(self.state2.dtype).strip('torch.'),
<17>:<del> 'nested_blocksize': self.state2.blocksize,
<18>:<del> 'nested_dtype': str(self.state2.dtype).strip('torch.'),
|
# module: bitsandbytes.functional
class QuantState:
def as_dict(self, packed=False):
<0> """
<1> returns dict of tensors and strings to use in serialization via _save_to_state_dict()
<2> param: packed -- returns dict[str, torch.Tensor] for state_dict
<3> """
<4> qs_dict = {
<5> 'absmax': self.absmax,
<6> 'code': self.code,
<7> 'shape': tuple(self.shape),
<8> 'dtype': str(self.dtype).strip('torch.'),
<9> 'blocksize': self.blocksize,
<10> 'quant_type': self.quant_type,
<11> }
<12> if self.nested:
<13> qs_dict.update({
<14> 'nested_absmax': self.state2.absmax,
<15> 'nested_code': self.state2.code,
<16> 'nested_offset': self.offset.item(),
<17> 'nested_blocksize': self.state2.blocksize,
<18> 'nested_dtype': str(self.state2.dtype).strip('torch.'),
<19> })
<20> if not packed:
<21> return qs_dict
<22>
<23> qs_packed_dict = {k: v for k, v in qs_dict.items() if isinstance(v, torch.Tensor)}
<24> non_tensor_dict = {k: v for k, v in qs_dict.items() if not isinstance(v, torch.Tensor)}
<25> qs_packed_dict["quant_state." + "bitsandbytes__" + self.quant_type] = pack_dict_to_tensor(non_tensor_dict)
<26> return qs_packed_dict
<27>
|
===========unchanged ref 0===========
at: bitsandbytes.functional.QuantState.__init__
self.absmax = absmax
self.shape = shape
self.code = code
self.dtype = dtype
self.blocksize = blocksize
self.quant_type = quant_type
self.offset = offset
self.state2 = state2
self.nested = state2 is not None
at: bitsandbytes.functional.QuantState.from_dict
quant_state = cls(
quant_type=qs_dict['quant_type'],
absmax=qs_dict['absmax'].to(device),
blocksize=qs_dict['blocksize'],
code=qs_dict['code'].to(device),
dtype=getattr(torch, qs_dict['dtype']),
shape=torch.Size(qs_dict['shape']),
offset=offset,
state2=state2,
)
at: bitsandbytes.functional.QuantState.to
self.absmax = self.absmax.to(device)
self.offset = self.offset.to(device)
===========changed ref 0===========
# module: bitsandbytes.functional
class QuantState:
@classmethod
def from_dict(cls, qs_dict: dict[str, Any], device: torch.device) -> 'QuantState':
"""
unpacks dict of tensors into QuantState
where necessary, convert into strings, torch.dtype, ints, etc.
quant_state_dict may contain item with non-tensor components with key like
`...weight.quant_state.bitsandbytes__[nf4/fp4]`
it is detected with key strored in qs_key, and then unpacked
"""
# unpacking tensor with non-tensor components
qs_key = [k for k, v in qs_dict.items() if "quant_state" in k and isinstance(v, torch.Tensor)]
+ assert len(qs_key) == 1 or not qs_key and 'quant_type' in qs_dict, \
+ f"`qs_dict` must contain packed quant_state items, or be unpacked. Found keys: {tuple(qs_dict.keys())}"
if len(qs_key) == 1:
qs_key = qs_key[0]
assert 'bitsandbytes__nf4' in qs_key or 'bitsandbytes__fp4' in qs_key, \
f"invalid qs_key value {qs_key}"
qs_dict |= unpack_tensor_to_dict(qs_dict.pop(qs_key))
qs_dict = {k.split('.')[-1]:v for k, v in qs_dict.items()} # strip prefixes
if 'nested_absmax' in qs_dict:
offset = torch.tensor(float(qs_dict['nested_offset'])).to(device)
state2 = cls(
absmax=qs_dict['nested_absmax'].to(device),
+ blocksize=qs_dict['nested_blocksize'],
code=qs_dict['nested_code'].to(device),
- blocksize=qs_dict['nested_blocksize'],
dtype=getattr(torch, qs_dict['nested_dtype']),
)
</s>
===========changed ref 1===========
# module: bitsandbytes.functional
class QuantState:
@classmethod
def from_dict(cls, qs_dict: dict[str, Any], device: torch.device) -> 'QuantState':
# offset: 1
<s>dict['nested_blocksize'],
dtype=getattr(torch, qs_dict['nested_dtype']),
)
else:
offset, state2 = None, None
quant_state = cls(
+ quant_type=qs_dict['quant_type'],
absmax=qs_dict['absmax'].to(device),
+ blocksize=qs_dict['blocksize'],
+ code=qs_dict['code'].to(device),
+ dtype=getattr(torch, qs_dict['dtype']),
shape=torch.Size(qs_dict['shape']),
- dtype=getattr(torch, qs_dict['dtype']),
- blocksize=qs_dict['blocksize'],
offset=offset,
state2=state2,
- quant_type=qs_dict['quant_type'],
- code=qs_dict['code'].to(device),
)
return quant_state
===========changed ref 2===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
@classmethod
def from_prequantized(cls, quantized_stats, data=None, requires_grad=False, device='cuda', **kwargs):
if data is None:
+ weight_key = [k for k in quantized_stats if k.endswith(".weight")][0]
+ data = quantized_stats.pop(weight_key)
- data = quantized_stats.pop('weight')
self = torch.Tensor._make_subclass(cls, data.to(device))
self.requires_grad = requires_grad
self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
self.blocksize = self.quant_state.blocksize
self.compress_statistics = self.quant_state.nested
self.quant_type = self.quant_state.quant_type
return self
|
bitsandbytes.nn.modules/Params4bit.from_prequantized
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
0a0b531fc71875a1fc04f74d963baf92e916e656
|
removed optional data=None in from_prequantized
|
<0>:<del> if data is None:
<1>:<del> weight_key = [k for k in quantized_stats if k.endswith(".weight")][0]
<2>:<del> data = quantized_stats.pop(weight_key)
|
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
@classmethod
+ def from_prequantized(cls, data, quantized_stats, requires_grad=False, device='cuda', **kwargs):
- def from_prequantized(cls, quantized_stats, data=None, requires_grad=False, device='cuda', **kwargs):
<0> if data is None:
<1> weight_key = [k for k in quantized_stats if k.endswith(".weight")][0]
<2> data = quantized_stats.pop(weight_key)
<3> self = torch.Tensor._make_subclass(cls, data.to(device))
<4> self.requires_grad = requires_grad
<5> self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
<6> self.blocksize = self.quant_state.blocksize
<7> self.compress_statistics = self.quant_state.nested
<8> self.quant_type = self.quant_state.quant_type
<9> return self
<10>
|
===========unchanged ref 0===========
at: bitsandbytes.functional
QuantState(absmax, shape=None, code=None, blocksize=None, quant_type=None, dtype=None, offset=None, state2=None)
at: bitsandbytes.functional.QuantState
from_dict(qs_dict: dict[str, Any], device: torch.device) -> 'QuantState'
at: bitsandbytes.nn.modules.Params4bit.cuda
self.data = w_4bit
|
bitsandbytes.autograd._functions/matmul_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
4c11d6dcdd87788b631c3c7f059c728180059e8d
|
reverted fn signatures in functional()
|
<6>:<add> out = F.gemv_4bit(A, B.t(), out, state=quant_state)
<del> out = F.gemv_4bit(A, B.t(), out, quant_state=quant_state)
|
# module: bitsandbytes.autograd._functions
def matmul_4bit(A: tensor, B: tensor, quant_state: F.QuantState, out: tensor = None, bias=None):
<0> assert quant_state is not None
<1> if A.numel() == A.shape[-1] and A.requires_grad == False:
<2> if A.shape[-1] % quant_state.blocksize != 0:
<3> warn(f'Some matrices hidden dimension is not a multiple of {quant_state.blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}')
<4> return MatMul4Bit.apply(A, B, out, bias, quant_state)
<5> else:
<6> out = F.gemv_4bit(A, B.t(), out, quant_state=quant_state)
<7> if bias is not None:
<8> out += bias
<9> return out
<10> else:
<11> return MatMul4Bit.apply(A, B, out, bias, quant_state)
<12>
| |
bitsandbytes.functional/gemv_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
4c11d6dcdd87788b631c3c7f059c728180059e8d
|
reverted fn signatures in functional()
|
<2>:<add> if state is None:
<del> if quant_state is None:
<8>:<add> Bshape = state.shape
<del> Bshape = quant_state.shape
<10>:<add> absmax = state.absmax
<del> absmax = quant_state.absmax
<11>:<add> if state.nested:
<del> if quant_state.nested:
<12>:<add> absmax = dequantize_blockwise(state.absmax, state.state2)
<del> absmax = dequantize_blockwise(quant_state.absmax, quant_state.state2)
<13>:<add> absmax += state.offset
<del> absmax += quant_state.offset
<27>:<add> is_on_gpu([B, A, out, absmax, state.code])
<del> is_on_gpu([B, A, out, absmax, quant_state.code])
|
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
+ state=None
- quant_state=None
):
<0> prev_device = pre_call(A.device)
<1> #sout = check_matmul(A, B, out, transposed_A, transposed_B, expected_type=A.dtype)
<2> if quant_state is None:
<3> raise ValueError(f'state cannot None. gem_4bit( ) requires the state from quantize_4bit( )')
<4>
<5> if A.numel() != A.shape[-1]:
<6> raise ValueError(f'Dimensions of A are invalid. Must be a vector with the leading dimensions of "1", e.g. [1, 1, 2048]')
<7>
<8> Bshape = quant_state.shape
<9> bout = Bshape[0]
<10> absmax = quant_state.absmax
<11> if quant_state.nested:
<12> absmax = dequantize_blockwise(quant_state.absmax, quant_state.state2)
<13> absmax += quant_state.offset
<14>
<15> if out is None:
<16> if len(A.shape) == 3:
<17> out = torch.empty(size=(A.shape[0], A.shape[1], bout), dtype=A.dtype, device=A.device)
<18> else:
<19> out = torch.empty(size=(A.shape[0], bout), dtype=A.dtype, device=A.device)
<20>
<21> n = 1
<22> m = Bshape[0]
<23> k = Bshape[1]
<24> lda = Bshape[0]
<25> ldc = Bshape[0]
<26> ldb = (A.shape[-1]+1)//2
<27> is_on_gpu([B, A, out, absmax, quant_state.code])
<28> m = ct.c_int32(m)
<29> n = ct.c</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
+ state=None
- quant_state=None
):
# offset: 1
k = ct.c_int32(k)
lda = ct.c_int32(lda)
ldb = ct.c_int32(ldb)
ldc = ct.c_int32(ldc)
if B.dtype == torch.uint8:
if A.dtype == torch.float16:
lib.cgemm_4bit_inference_naive_fp16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(quant_state.code), get_ptr(out), lda, ldb, ldc, ct.c_int32(quant_state.blocksize))
elif A.dtype == torch.bfloat16:
lib.cgemm_4bit_inference_naive_bf16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(quant_state.code), get_ptr(out), lda, ldb, ldc, ct.c_int32(quant_state.blocksize))
elif A.dtype == torch.float32:
lib.cgemm_4bit_inference_naive_fp32(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(quant_state.code), get_ptr(out), lda, ldb, ldc, ct.c_int32(quant_state.blocksize))
else:
raise NotImplementedError(f'Matmul not implemented for data type {A.dtype}')
else:
raise NotImplementedError(f'Matmul not implemented for data type {A.dtype}')
post_call(prev_device)
return out
===========changed ref 0===========
# module: bitsandbytes.autograd._functions
def matmul_4bit(A: tensor, B: tensor, quant_state: F.QuantState, out: tensor = None, bias=None):
assert quant_state is not None
if A.numel() == A.shape[-1] and A.requires_grad == False:
if A.shape[-1] % quant_state.blocksize != 0:
warn(f'Some matrices hidden dimension is not a multiple of {quant_state.blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}')
return MatMul4Bit.apply(A, B, out, bias, quant_state)
else:
+ out = F.gemv_4bit(A, B.t(), out, state=quant_state)
- out = F.gemv_4bit(A, B.t(), out, quant_state=quant_state)
if bias is not None:
out += bias
return out
else:
return MatMul4Bit.apply(A, B, out, bias, quant_state)
|
bitsandbytes.functional/mm_dequant
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
4c11d6dcdd87788b631c3c7f059c728180059e8d
|
reverted fn signatures in functional()
|
<2>:<add> out_shape = quant_state[0]
<del> out_shape = state[0]
|
# module: bitsandbytes.functional
def mm_dequant(
A,
+ quant_state,
- state,
row_stats,
col_stats,
out=None,
new_row_stats=None,
new_col_stats=None,
bias=None
):
<0> assert A.dtype == torch.int32
<1> if bias is not None: assert bias.dtype == torch.float16
<2> out_shape = state[0]
<3> if len(out_shape) == 3:
<4> out_shape = (out_shape[0] * out_shape[1], out_shape[2])
<5>
<6> if out is None:
<7> out = torch.empty(out_shape, dtype=torch.float16, device=A.device)
<8> if new_row_stats is None:
<9> new_row_stats = torch.empty(
<10> out_shape[0], dtype=torch.float32, device=A.device
<11> )
<12> if new_col_stats is None:
<13> new_col_stats = torch.empty(
<14> out_shape[1], dtype=torch.float32, device=A.device
<15> )
<16> assert (
<17> new_row_stats.shape[0] == row_stats.shape[0]
<18> ), f"{new_row_stats.shape} vs {row_stats.shape}"
<19> assert (
<20> new_col_stats.shape[0] == col_stats.shape[0]
<21> ), f"{new_col_stats.shape} vs {col_stats.shape}"
<22>
<23> prev_device = pre_call(A.device)
<24> ptrA = get_ptr(A)
<25> ptrOut = get_ptr(out)
<26> ptrRowStats = get_ptr(row_stats)
<27> ptrColStats = get_ptr(col_stats)
<28> ptrNewRowStats = get_ptr(new_row_stats)
<29> ptrNewColStats = get_ptr(new_col_stats)
<30> ptrBias = get</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def mm_dequant(
A,
+ quant_state,
- state,
row_stats,
col_stats,
out=None,
new_row_stats=None,
new_col_stats=None,
bias=None
):
# offset: 1
numRows = ct.c_int32(out_shape[0])
numCols = ct.c_int32(out_shape[1])
is_on_gpu([A, row_stats, col_stats, out, new_row_stats, new_col_stats, bias])
lib.cdequant_mm_int32_fp16(ptrA, ptrRowStats, ptrColStats, ptrOut, ptrNewRowStats, ptrNewColStats, ptrBias, numRows, numCols)
post_call(prev_device)
return out
===========changed ref 0===========
# module: bitsandbytes.autograd._functions
def matmul_4bit(A: tensor, B: tensor, quant_state: F.QuantState, out: tensor = None, bias=None):
assert quant_state is not None
if A.numel() == A.shape[-1] and A.requires_grad == False:
if A.shape[-1] % quant_state.blocksize != 0:
warn(f'Some matrices hidden dimension is not a multiple of {quant_state.blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}')
return MatMul4Bit.apply(A, B, out, bias, quant_state)
else:
+ out = F.gemv_4bit(A, B.t(), out, state=quant_state)
- out = F.gemv_4bit(A, B.t(), out, quant_state=quant_state)
if bias is not None:
out += bias
return out
else:
return MatMul4Bit.apply(A, B, out, bias, quant_state)
===========changed ref 1===========
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
+ state=None
- quant_state=None
):
prev_device = pre_call(A.device)
#sout = check_matmul(A, B, out, transposed_A, transposed_B, expected_type=A.dtype)
+ if state is None:
- if quant_state is None:
raise ValueError(f'state cannot None. gem_4bit( ) requires the state from quantize_4bit( )')
if A.numel() != A.shape[-1]:
raise ValueError(f'Dimensions of A are invalid. Must be a vector with the leading dimensions of "1", e.g. [1, 1, 2048]')
+ Bshape = state.shape
- Bshape = quant_state.shape
bout = Bshape[0]
+ absmax = state.absmax
- absmax = quant_state.absmax
+ if state.nested:
- if quant_state.nested:
+ absmax = dequantize_blockwise(state.absmax, state.state2)
- absmax = dequantize_blockwise(quant_state.absmax, quant_state.state2)
+ absmax += state.offset
- absmax += quant_state.offset
if out is None:
if len(A.shape) == 3:
out = torch.empty(size=(A.shape[0], A.shape[1], bout), dtype=A.dtype, device=A.device)
else:
out = torch.empty(size=(A.shape[0], bout), dtype=A.dtype, device=A.device)
n = 1
m = Bshape[0]
k = Bshape[1]
lda = Bshape[0]
ldc = Bshape[0]
ldb = (A.shape[-1]+1)//2</s>
===========changed ref 2===========
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
+ state=None
- quant_state=None
):
# offset: 1
<s>shape[0]
ldc = Bshape[0]
ldb = (A.shape[-1]+1)//2
+ is_on_gpu([B, A, out, absmax, state.code])
- is_on_gpu([B, A, out, absmax, quant_state.code])
m = ct.c_int32(m)
n = ct.c_int32(n)
k = ct.c_int32(k)
lda = ct.c_int32(lda)
ldb = ct.c_int32(ldb)
ldc = ct.c_int32(ldc)
if B.dtype == torch.uint8:
if A.dtype == torch.float16:
+ lib.cgemm_4bit_inference_naive_fp16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(state.code), get_ptr(out), lda, ldb, ldc, ct.c_int32(state.blocksize))
- lib.cgemm_4bit_inference_naive_fp16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(quant_state.code), get_ptr(out), lda, ldb, ldc, ct.c_int32(quant_state.blocksize))
elif A.dtype == torch.bfloat16:
+ lib.cgemm_4bit_inference_naive_bf16(m, n, k, get_ptr(A), get_ptr(</s>
===========changed ref 3===========
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
+ state=None
- quant_state=None
):
# offset: 2
<s> get_ptr(absmax), get_ptr(state.code), get_ptr(out), lda, ldb, ldc, ct.c_int32(state.blocksize))
- lib.cgemm_4bit_inference_naive_bf16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(quant_state.code), get_ptr(out), lda, ldb, ldc, ct.c_int32(quant_state.blocksize))
elif A.dtype == torch.float32:
+ lib.cgemm_4bit_inference_naive_fp32(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(state.code), get_ptr(out), lda, ldb, ldc, ct.c_int32(state.blocksize))
- lib.cgemm_4bit_inference_naive_fp32(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(quant_state.code), get_ptr(out), lda, ldb, ldc, ct.c_int32(quant_state.blocksize))
else:
raise NotImplementedError(f'Matmul not implemented for data type {A.dtype}')
else:
raise NotImplementedError(f'Matmul not implemented for data type {A.dtype}')
post_call(prev_device)
return out
|
tests.test_functional/test_gemv_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
4c11d6dcdd87788b631c3c7f059c728180059e8d
|
reverted fn signatures in functional()
|
<s>4', 'fp4'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
<0> for dim in [128, 256, 512, 1024]:
<1> #for dim in [4*1024]:
<2> #for dim in [1*16]:
<3> errs1 = []
<4> errs2 = []
<5> errs3 = []
<6> relerrs1 = []
<7> relerrs2 = []
<8> relerrs3 = []
<9> max_errs1 = []
<10> max_errs2 = []
<11> max_errs3 = []
<12>
<13>
<14> for i in range(100):
<15> if kind == 'fc1':
<16> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<17> B = torch.randn(dim*4, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<18> elif kind == 'fc2':
<19> A = torch.randn(1, 4*dim, dtype=dtype, device='cuda')
<20> B = torch.randn(dim, 4*dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<21> elif kind == 'attn':
<22> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<23> B = torch.randn(dim, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<24> elif kind == 'attn_packed':
<25> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<26> B = torch.randn(dim*3, dim, dtype=dtype, device</s>
|
===========below chunk 0===========
<s>'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 1
qB, state = F.quantize_4bit(B, quant_type=storage_type, compress_statistics=double_quant)
C3 = torch.matmul(A, B.t())
C2 = F.gemv_4bit(A, qB.t(), quant_state=state)
A.requires_grad = True
C1 = bnb.matmul_4bit(A, qB.t(), state)
err1 = (C1-C2).abs().float()
err2 = (C3-C2).abs().float()
err3 = (C3-C1).abs().float()
mag1 = torch.abs(C1).float()+1e-5
mag2 = torch.abs(C3).float()+1e-5
mag3 = torch.abs(C3).float()+1e-5
relerr1 = err1/mag1
relerr2 = err2/mag2
relerr3 = err3/mag3
max_err1 = err1.max()
max_err2 = err2.max()
max_err3 = err3.max()
errs1.append(err1.mean().item())
errs2.append(err2.mean().item())
errs3.append(err3.mean().item())
relerrs1.append(relerr1.mean().item())
relerrs2.append(relerr2.mean().item())
relerrs3.append(relerr3.mean().item())
</s>
===========below chunk 1===========
<s>'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 2
<s>append(relerr2.mean().item())
relerrs3.append(relerr3.mean().item())
max_errs1.append(max_err1.item())
max_errs2.append(max_err2.item())
max_errs3.append(max_err3.item())
c = int(C1.numel()*0.0014*(dim/256))+1
c = assert_all_approx_close(C1, C2, 1e-5, 0.01, count=c, throw=False)
err1 = sum(errs1)/len(errs1)/math.sqrt(dim)
err2 = sum(errs2)/len(errs2)/math.sqrt(dim)
err3 = sum(errs3)/len(errs3)/math.sqrt(dim)
relerr1 = sum(relerrs1)/len(relerrs1)/math.sqrt(dim)
relerr2 = sum(relerrs2)/len(relerrs2)/math.sqrt(dim)
relerr3 = sum(relerrs3)/len(relerrs3)/math.sqrt(dim)
maxerr1 = sum(max_errs1)/len(max_errs1)/math.sqrt(dim)
maxerr2 = sum(max_errs2)/len(max_errs2)/math.sqrt(dim)
maxerr3 = sum</s>
===========below chunk 2===========
<s>'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 3
<s>_errs3)/len(max_errs3)/math.sqrt(dim)
absratio = err2/err3
relratio = relerr2/relerr3
maxratio = relerr2/relerr3
# for debugging if the tests fails
#
#print('='*80)
#print(f'For matmul: {A.shape}, {B.shape}, {kind}, {dtype}, {storage_type}, double_quant={double_quant}:')
print(C1.flatten()[-20:])
print(C2.flatten()[-20:])
print(f'inference vs training abs: {err1}')
print(f'inference vs training rel: {relerr1}')
print(f'inference vs training max: {maxerr1}')
#print(f'inference vs training vs torch err ratio abs: {absratio}')
#print(f'inference vs training vs torch err ratio rel: {relratio}')
#print(f'inference vs training vs torch err ratio max: {maxratio}')
if dtype == torch.float16:
if dim <= 512:
assert err1 < 7e-5
assert relerr1 < 0.0008
else:
assert err1 < 6e-5
assert relerr1 < 2e-4
assert absratio < 1.005 and absratio > 0.995
assert relratio < 1.005</s>
===========below chunk 3===========
<s>'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 4
<s>ratio > 0.995
assert maxratio < 1.005 and maxratio > 0.995
elif dtype == torch.float32:
if dim <= 512:
assert err1 < 5e-8
assert relerr1 < 1e-6
assert maxerr1 < 1e-7
else:
assert err1 < 5e-8
assert relerr1 < 8e-6
assert maxerr1 < 1e-7
assert absratio < 1.005 and absratio > 0.995
assert relratio < 1.005 and relratio > 0.995
assert maxratio < 1.005 and maxratio > 0.995
elif dtype == torch.bfloat16:
if dim <= 512:
assert err1 < 6e-4
assert relerr1 < 0.007
assert maxerr1 < 0.015
else:
assert err1 < 2e-4
assert relerr1 < 0.002
assert maxerr1 < 0.0012
assert absratio < 1.005 and absratio > 0.995
assert relratio < 1.04 and relratio > 0.96
assert maxratio < 1.02 and maxratio > 0.98
|
|
tests.test_linear4bit/test_linear4_state_dict
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
965fd5d530e3d7bf033aac73abcf189f1a19fea0
|
test update
|
# module: tests.test_linear4bit
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
@pytest.mark.parametrize(
"quant_type, compress_statistics, bias",
list(product(["nf4", "fp4"], [False, True], [False, True])),
)
def test_linear4_state_dict(quant_type, compress_statistics, bias):
<0> original_dtype = torch.float16
<1> compute_dtype = None
<2> device = "cuda"
<3> layer_shape = (300, 400)
<4>
<5> linear = torch.nn.Linear(*layer_shape, dtype=original_dtype) # original layer
<6>
<7> # Quantizing original layer
<8> linear_q = bnb.nn.Linear4bit(
<9> linear.in_features,
<10> linear.out_features,
<11> bias=bias,
<12> compute_dtype=compute_dtype,
<13> compress_statistics=compress_statistics,
<14> quant_type=quant_type,
<15> device=device,
<16> )
<17> new_weight = bnb.nn.Params4bit(data=linear.weight, requires_grad=False)
<18> linear_q.weight = new_weight.to(device)
<19> if bias:
<20> linear_q.bias.data = linear.bias.data.to(device)
<21>
<22> sd = linear_q.state_dict()
<23>
<24> # restoring from state_dict:
<25>
<26> sd = linear_q.state_dict()
<27> bias_data2 = sd.pop("bias", None)
<28> weight_data2 = sd.pop("weight")
<29>
<30> weight2 = bnb.nn.Params4bit.from_prequantized(quantized_stats=sd, data=weight_data2)
<31>
<32> linear_q2 = bnb.nn.Linear4bit(
<33> linear.in_features,
<34> linear.out_features,
<35> bias=bias,
<36> compute_dtype=compute_dtype</s>
|
===========below chunk 0===========
# module: tests.test_linear4bit
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
@pytest.mark.parametrize(
"quant_type, compress_statistics, bias",
list(product(["nf4", "fp4"], [False, True], [False, True])),
)
def test_linear4_state_dict(quant_type, compress_statistics, bias):
# offset: 1
compress_statistics=compress_statistics,
quant_type=quant_type,
device=device,
)
linear_q2.weight = weight2.to(device)
if bias:
linear_q2.bias.data = bias_data2
# matching
a, b = linear_q.weight, linear_q2.weight
assert a.device == b.device
assert a.dtype == b.dtype
assert torch.equal(a, b)
q0 = a.quant_state
q1 = b.quant_state
for attr in ('code', 'dtype', 'blocksize', 'absmax'):
c, d = getattr(q0, attr), getattr(q1, attr)
if isinstance(c, torch.Tensor):
assert torch.equal(c, d)
else:
assert c == d, f"{c} != {d}"
if q0.state2 is not None:
for attr in ('code', 'dtype', 'blocksize', 'absmax'):
c, d = getattr(q0.state2, attr), getattr(q1.state2, attr)
if isinstance(c, torch.Tensor):
assert torch.equal(c, d)
else:
assert c == d, f"{c} != {d}"
if bias:
a, b = linear_q.bias, linear_q2.bias
assert a.device == b.device
assert a.dtype == b.dtype
assert torch.equal(a, b)
# Forward test
x = torch.rand(42,</s>
===========below chunk 1===========
# module: tests.test_linear4bit
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
@pytest.mark.parametrize(
"quant_type, compress_statistics, bias",
list(product(["nf4", "fp4"], [False, True], [False, True])),
)
def test_linear4_state_dict(quant_type, compress_statistics, bias):
# offset: 2
<s> b.dtype
assert torch.equal(a, b)
# Forward test
x = torch.rand(42, linear_q.shape[-1], device=device)
a = linear_q(x)
b = linear_q2(x)
assert a.device == b.device
assert a.dtype == b.dtype
assert torch.equal(a, b)
# Saved size ratio test. Target set for layer_shape == (300, 400) w/ bias
with TemporaryDirectory() as tmpdir:
state_path_4bit = os.path.join(tmpdir, "state_4bit.pth")
state_path = os.path.join(tmpdir, "state.pth")
torch.save(linear.state_dict(), state_path)
torch.save(linear_q.state_dict(), state_path_4bit)
size_orig, size_4 = os.path.getsize(state_path), os.path.getsize(
state_path_4bit
)
size_ratio = size_4 / size_orig
target_compression = 0.143 if original_dtype == torch.float32 else 0.285
ratio_error_msg = f"quantized_size {size_4:,} is larger on disk than {target_compression:.2%} of original size {size_orig:,}"
assert size_ratio < target_compression, ratio_error_msg
===========unchanged ref 0===========
at: _pytest.mark.structures
MARK_GEN = MarkGenerator(_ispytest=True)
at: _pytest.mark.structures.MarkGenerator
skip: _SkipMarkDecorator
skipif: _SkipifMarkDecorator
xfail: _XfailMarkDecorator
parametrize: _ParametrizeMarkDecorator
usefixtures: _UsefixturesMarkDecorator
filterwarnings: _FilterwarningsMarkDecorator
at: bitsandbytes.nn.modules
Params4bit(data: Tensor=..., requires_grad: builtins.bool=...)
Linear4bit(input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4', device=None)
at: bitsandbytes.nn.modules.Linear4bit.__init__
self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
at: bitsandbytes.nn.modules.Params4bit
from_prequantized(data, quantized_stats, requires_grad=False, device='cuda', **kwargs)
to(device: Optional[Union[int, device]]=..., dtype: Optional[Union[dtype, str]]=..., non_blocking: bool=...) -> T
to(tensor: Tensor, non_blocking: bool=...) -> T
to(dtype: Union[dtype, str], non_blocking: bool=...) -> T
at: bitsandbytes.nn.modules.Params4bit.cuda
self.quant_state = quant_state
===========unchanged ref 1===========
at: itertools
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4], iter5: Iterable[_T5], iter6: Iterable[_T6]) -> Iterator[Tuple[_T1, _T2, _T3, _T4, _T5, _T6]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3]) -> Iterator[Tuple[_T1, _T2, _T3]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4]) -> Iterator[Tuple[_T1, _T2, _T3, _T4]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2]) -> Iterator[Tuple[_T1, _T2]]
product(*iterables: Iterable[_T1], repeat: int) -> Iterator[Tuple[_T1, ...]]
product(iter1: Iterable[_T1]) -> Iterator[Tuple[_T1]]
product(*iterables: Iterable[Any], repeat: int=...) -> Iterator[Tuple[Any, ...]]
product(iter1: Iterable[Any], iter2: Iterable[Any], iter3: Iterable[Any], iter4: Iterable[Any], iter5: Iterable[Any], iter6: Iterable[Any], iter7: Iterable[Any], *iterables: Iterable[Any]) -> Iterator[Tuple[Any, ...]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4], iter5: Iterable[_T5]) -> Iterator[Tuple[_T1, _T2, _T3, _T4, _T5]]
at: os.path
join(a: StrPath, *paths: StrPath) -> str
join(a: BytesPath, *paths: BytesPath) -> bytes
getsize(filename: AnyPath) -> int
===========unchanged ref 2===========
at: tempfile
TemporaryDirectory(suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=...)
at: torch._C
float32: dtype = ...
float16: dtype = ...
at: torch._C._VariableFunctions
equal(input: Tensor, other: Tensor) -> _bool
|
|
bitsandbytes.functional/QuantState.from_dict
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
76b40a5c9ae708db98e8b4a13249b2806601a387
|
save/load via state_dict now
|
<1>:<add> unpacks components of state_dict into QuantState
<del> unpacks dict of tensors into QuantState
<4>:<add> qs_dict: based on state_dict, with only relevant keys, striped of prefixes.
<add>
<add> item with key `quant_state.bitsandbytes__[nf4/fp4]` may contain minor and non-tensor quant state items.
<del> quant_state_dict may contain item with non-tensor components with key like
<5>:<del> `...weight.quant_state.bitsandbytes__[nf4/fp4]`
<6>:<del> it is detected with key strored in qs_key, and then unpacked
<10>:<add> qs_key = [k for k, v in qs_dict.items() if k in cls.valid_qs_type_keys and isinstance(v, torch.Tensor)]
<del> qs_key = [k for k, v in qs_dict.items() if "quant_state" in k and isinstance(v, torch.Tensor)]
<11>:<add> if not len(qs_key) and 'quant_type' not in qs_dict:
<add> raise ValueError("Expected packed or unpacked quant_state items, found neither")
<add> elif len(qs_key) != 1:
<add> raise ValueError(f"There should be exaclly one quant_state item with key from {self.valid_qs_type_keys}. Detected {len(qs_ley)} such items")
<add>
<add> # unpacking minor and non-tensor quant state items if necessary
<del> assert len(qs_key) == 1 or not qs_key and 'quant_type' in qs_dict, \
<12>:<del> f"`qs_dict` must contain packed quant_state items, or be unpacked. Found keys: {tuple(qs_dict.keys())}"
|
# module: bitsandbytes.functional
class QuantState:
@classmethod
def from_dict(cls, qs_dict: dict[str, Any], device: torch.device) -> 'QuantState':
<0> """
<1> unpacks dict of tensors into QuantState
<2> where necessary, convert into strings, torch.dtype, ints, etc.
<3>
<4> quant_state_dict may contain item with non-tensor components with key like
<5> `...weight.quant_state.bitsandbytes__[nf4/fp4]`
<6> it is detected with key strored in qs_key, and then unpacked
<7> """
<8>
<9> # unpacking tensor with non-tensor components
<10> qs_key = [k for k, v in qs_dict.items() if "quant_state" in k and isinstance(v, torch.Tensor)]
<11> assert len(qs_key) == 1 or not qs_key and 'quant_type' in qs_dict, \
<12> f"`qs_dict` must contain packed quant_state items, or be unpacked. Found keys: {tuple(qs_dict.keys())}"
<13> if len(qs_key) == 1:
<14> qs_key = qs_key[0]
<15> assert 'bitsandbytes__nf4' in qs_key or 'bitsandbytes__fp4' in qs_key, \
<16> f"invalid qs_key value {qs_key}"
<17> qs_dict |= unpack_tensor_to_dict(qs_dict.pop(qs_key))
<18>
<19> qs_dict = {k.split('.')[-1]:v for k, v in qs_dict.items()} # strip prefixes
<20>
<21> if 'nested_absmax' in qs_dict:
<22> offset = torch.tensor(float(qs_dict['nested_offset'])).to(device)
<23> state2 = cls(
<24> absmax=qs_dict['nested_absmax'].to(device),
<25> blocksize=qs_dict['nested_blocksize'],
<26> code=qs_dict['nested_code'].to(device),
<27> dtype=getattr(torch,</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
class QuantState:
@classmethod
def from_dict(cls, qs_dict: dict[str, Any], device: torch.device) -> 'QuantState':
# offset: 1
)
else:
offset, state2 = None, None
quant_state = cls(
quant_type=qs_dict['quant_type'],
absmax=qs_dict['absmax'].to(device),
blocksize=qs_dict['blocksize'],
code=qs_dict['code'].to(device),
dtype=getattr(torch, qs_dict['dtype']),
shape=torch.Size(qs_dict['shape']),
offset=offset,
state2=state2,
)
return quant_state
===========unchanged ref 0===========
at: bitsandbytes.functional.QuantState
valid_quant_types = ('fp4', 'nf4')
valid_qs_type_keys = [f"quant_state.bitsandbytes__{x}" for x in valid_quant_types]
valid_qs_keys = ['absmax', 'code', 'nested_absmax', 'nested_code', 'quant_state',
'quant_type', 'blocksize', 'dtype', 'shape', 'nested_blocksize', 'nested_dtype', 'nested_offset']
at: bitsandbytes.functional.QuantState.__init__
self.absmax = absmax
self.shape = shape
self.dtype = dtype
self.blocksize = blocksize
self.quant_type = quant_type
self.offset = offset
self.state2 = state2
self.nested = state2 is not None
at: bitsandbytes.functional.QuantState.to
self.absmax = self.absmax.to(device)
self.offset = self.offset.to(device)
at: bitsandbytes.utils
unpack_tensor_to_dict(tensor_data)
at: torch._C
device(device: Union[_device, _int, str])
device(type: str, index: _int)
at: torch._C._VariableFunctions
tensor(data: Any, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor
at: typing.MutableMapping
pop(key: _KT) -> _VT
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
===========changed ref 0===========
# module: bitsandbytes.functional
class QuantState:
+ """container for quantization state components to work with Params4bit and similar clases"""
- """container for quantizationstate components to work with Params4bit and similar clases"""
+ valid_quant_types = ('fp4', 'nf4')
+ valid_qs_type_keys = [f"quant_state.bitsandbytes__{x}" for x in valid_quant_types]
+ valid_qs_keys = ['absmax', 'code', 'nested_absmax', 'nested_code', 'quant_state',
+ 'quant_type', 'blocksize', 'dtype', 'shape', 'nested_blocksize', 'nested_dtype', 'nested_offset']
===========changed ref 1===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
-
- @classmethod
- def from_prequantized(cls, data, quantized_stats, requires_grad=False, device='cuda', **kwargs):
- self = torch.Tensor._make_subclass(cls, data.to(device))
- self.requires_grad = requires_grad
- self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
- self.blocksize = self.quant_state.blocksize
- self.compress_statistics = self.quant_state.nested
- self.quant_type = self.quant_state.quant_type
- return self
-
===========changed ref 2===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
+ missing_keys, unexpected_keys, error_msgs):
+ # Note: super()._load_from_state_dict() is not called here intentionally.
+ if self.bias is not None:
+ bias_data = state_dict.pop(prefix + "bias", None)
+ self.bias.data = bias_data.to(self.bias.data.device)
+
+ self.weight, state_dict = bnb.nn.Params4bit.from_state_dict(
+ state_dict, prefix=prefix + "weight" + ".", requires_grad=False
+ )
+ unexpected_keys.extend(state_dict.keys())
+
===========changed ref 3===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
+
+ @classmethod
+ def from_state_dict(cls, state_dict, prefix="", requires_grad=False):
+ data = state_dict.pop(prefix.rstrip('.'))
+
+ # extracting components for QuantState from state_dict
+ qs_dict = {}
+ for k, v in state_dict.items():
+ if k.replace(prefix, '').split('.')[0] in QuantState.valid_qs_keys:
+ qs_dict[k] = v
+ state_dict = {k: v for k, v in state_dict.items() if k not in qs_dict}
+ qs_dict = {k.replace(prefix, ''): v for k, v in qs_dict.items()}
+
+ if data.device.type != "cuda":
+ raise ValueError(f"`data.device.type` must be 'cuda', detected {data.device.type}")
+
+ cls.requires_grad = requires_grad,
+ cls.quant_state = QuantState.from_dict(qs_dict=qs_dict, device=data.device)
+
+ self = torch.Tensor._make_subclass(cls, data=data.to(data.device))
+ return self, state_dict
+
|
bitsandbytes.functional/QuantState.from_dict
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
851806e023c428df63e30f4c29a181306d4cbd7c
|
renamed code to `quant_map` in serialized QState
|
<26>:<add> code=qs_dict['nested_quant_map'].to(device),
<del> code=qs_dict['nested_code'].to(device),
|
# module: bitsandbytes.functional
class QuantState:
@classmethod
def from_dict(cls, qs_dict: dict[str, Any], device: torch.device) -> 'QuantState':
<0> """
<1> unpacks components of state_dict into QuantState
<2> where necessary, convert into strings, torch.dtype, ints, etc.
<3>
<4> qs_dict: based on state_dict, with only relevant keys, striped of prefixes.
<5>
<6> item with key `quant_state.bitsandbytes__[nf4/fp4]` may contain minor and non-tensor quant state items.
<7> """
<8>
<9> # unpacking tensor with non-tensor components
<10> qs_key = [k for k, v in qs_dict.items() if k in cls.valid_qs_type_keys and isinstance(v, torch.Tensor)]
<11> if not len(qs_key) and 'quant_type' not in qs_dict:
<12> raise ValueError("Expected packed or unpacked quant_state items, found neither")
<13> elif len(qs_key) != 1:
<14> raise ValueError(f"There should be exaclly one quant_state item with key from {self.valid_qs_type_keys}. Detected {len(qs_ley)} such items")
<15>
<16> # unpacking minor and non-tensor quant state items if necessary
<17> if len(qs_key) == 1:
<18> qs_key = qs_key[0]
<19> qs_dict |= unpack_tensor_to_dict(qs_dict.pop(qs_key))
<20>
<21> if 'nested_absmax' in qs_dict:
<22> offset = torch.tensor(float(qs_dict['nested_offset'])).to(device)
<23> state2 = cls(
<24> absmax=qs_dict['nested_absmax'].to(device),
<25> blocksize=qs_dict['nested_blocksize'],
<26> code=qs_dict['nested_code'].to(device),
<27> dtype=getattr(torch, qs_dict['nested_dtype']),
<28> )
<29> else:
<30> </s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
class QuantState:
@classmethod
def from_dict(cls, qs_dict: dict[str, Any], device: torch.device) -> 'QuantState':
# offset: 1
quant_state = cls(
quant_type=qs_dict['quant_type'],
absmax=qs_dict['absmax'].to(device),
blocksize=qs_dict['blocksize'],
code=qs_dict['code'].to(device),
dtype=getattr(torch, qs_dict['dtype']),
shape=torch.Size(qs_dict['shape']),
offset=offset,
state2=state2,
)
return quant_state
===========unchanged ref 0===========
at: bitsandbytes.functional.QuantState
valid_quant_types = ('fp4', 'nf4')
valid_qs_type_keys = [f"quant_state.bitsandbytes__{x}" for x in valid_quant_types]
valid_qs_keys = ['absmax', 'quant_map', 'nested_absmax', 'nested_quant_map', 'quant_state',
'quant_type', 'blocksize', 'dtype', 'shape', 'nested_blocksize', 'nested_dtype', 'nested_offset']
at: bitsandbytes.utils
unpack_tensor_to_dict(tensor_data)
at: torch._C
device(device: Union[_device, _int, str])
device(type: str, index: _int)
Size()
at: torch._C._VariableFunctions
tensor(data: Any, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor
at: typing.MutableMapping
pop(key: _KT) -> _VT
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
===========changed ref 0===========
# module: bitsandbytes.functional
class QuantState:
"""container for quantization state components to work with Params4bit and similar clases"""
valid_quant_types = ('fp4', 'nf4')
valid_qs_type_keys = [f"quant_state.bitsandbytes__{x}" for x in valid_quant_types]
+ valid_qs_keys = ['absmax', 'quant_map', 'nested_absmax', 'nested_quant_map', 'quant_state',
- valid_qs_keys = ['absmax', 'code', 'nested_absmax', 'nested_code', 'quant_state',
'quant_type', 'blocksize', 'dtype', 'shape', 'nested_blocksize', 'nested_dtype', 'nested_offset']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.