path
stringlengths 9
117
| type
stringclasses 2
values | project
stringclasses 10
values | commit_hash
stringlengths 40
40
| commit_message
stringlengths 1
137
| ground_truth
stringlengths 0
2.74k
| main_code
stringlengths 102
3.37k
| context
stringlengths 0
14.7k
|
---|---|---|---|---|---|---|---|
bitsandbytes.functional/QuantState.as_dict
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
851806e023c428df63e30f4c29a181306d4cbd7c
|
renamed code to `quant_map` in serialized QState
|
<8>:<add> 'quant_map': self.code,
<del> 'code': self.code,
<16>:<add> 'nested_quant_map': self.state2.code,
<del> 'nested_code': self.state2.code,
|
# module: bitsandbytes.functional
class QuantState:
def as_dict(self, packed=False):
<0> """
<1> returns dict of tensors and strings to use in serialization via _save_to_state_dict()
<2> param: packed -- returns dict[str, torch.Tensor] for state_dict
<3> """
<4> qs_dict = {
<5> 'quant_type': self.quant_type,
<6> 'absmax': self.absmax,
<7> 'blocksize': self.blocksize,
<8> 'code': self.code,
<9> 'dtype': str(self.dtype).strip('torch.'),
<10> 'shape': tuple(self.shape) if self.nested else None,
<11> }
<12> if self.nested:
<13> qs_dict.update({
<14> 'nested_absmax': self.state2.absmax,
<15> 'nested_blocksize': self.state2.blocksize,
<16> 'nested_code': self.state2.code,
<17> 'nested_dtype': str(self.state2.dtype).strip('torch.'),
<18> 'nested_offset': self.offset.item(),
<19> })
<20> if not packed:
<21> return qs_dict
<22>
<23> qs_packed_dict = {k: v for k, v in qs_dict.items() if isinstance(v, torch.Tensor)}
<24> non_tensor_dict = {k: v for k, v in qs_dict.items() if not isinstance(v, torch.Tensor)}
<25> qs_packed_dict["quant_state." + "bitsandbytes__" + self.quant_type] = pack_dict_to_tensor(non_tensor_dict)
<26> return qs_packed_dict
<27>
|
===========unchanged ref 0===========
at: bitsandbytes.functional.QuantState.__init__
self.absmax = absmax
self.shape = shape
self.code = code
self.dtype = dtype
self.blocksize = blocksize
self.quant_type = quant_type
self.offset = offset
self.state2 = state2
self.nested = state2 is not None
at: bitsandbytes.functional.QuantState.to
self.absmax = self.absmax.to(device)
self.offset = self.offset.to(device)
at: bitsandbytes.utils
pack_dict_to_tensor(source_dict)
===========changed ref 0===========
# module: bitsandbytes.functional
class QuantState:
"""container for quantization state components to work with Params4bit and similar clases"""
valid_quant_types = ('fp4', 'nf4')
valid_qs_type_keys = [f"quant_state.bitsandbytes__{x}" for x in valid_quant_types]
+ valid_qs_keys = ['absmax', 'quant_map', 'nested_absmax', 'nested_quant_map', 'quant_state',
- valid_qs_keys = ['absmax', 'code', 'nested_absmax', 'nested_code', 'quant_state',
'quant_type', 'blocksize', 'dtype', 'shape', 'nested_blocksize', 'nested_dtype', 'nested_offset']
===========changed ref 1===========
# module: bitsandbytes.functional
class QuantState:
@classmethod
def from_dict(cls, qs_dict: dict[str, Any], device: torch.device) -> 'QuantState':
"""
unpacks components of state_dict into QuantState
where necessary, convert into strings, torch.dtype, ints, etc.
qs_dict: based on state_dict, with only relevant keys, striped of prefixes.
item with key `quant_state.bitsandbytes__[nf4/fp4]` may contain minor and non-tensor quant state items.
"""
# unpacking tensor with non-tensor components
qs_key = [k for k, v in qs_dict.items() if k in cls.valid_qs_type_keys and isinstance(v, torch.Tensor)]
if not len(qs_key) and 'quant_type' not in qs_dict:
raise ValueError("Expected packed or unpacked quant_state items, found neither")
elif len(qs_key) != 1:
raise ValueError(f"There should be exaclly one quant_state item with key from {self.valid_qs_type_keys}. Detected {len(qs_ley)} such items")
# unpacking minor and non-tensor quant state items if necessary
if len(qs_key) == 1:
qs_key = qs_key[0]
qs_dict |= unpack_tensor_to_dict(qs_dict.pop(qs_key))
if 'nested_absmax' in qs_dict:
offset = torch.tensor(float(qs_dict['nested_offset'])).to(device)
state2 = cls(
absmax=qs_dict['nested_absmax'].to(device),
blocksize=qs_dict['nested_blocksize'],
+ code=qs_dict['nested_quant_map'].to(device),
- code=qs_dict['nested_code'].to(device),
dtype=getattr(torch, qs_dict['nested_dtype']),
)
else:
offset, state2 = None, None
</s>
===========changed ref 2===========
# module: bitsandbytes.functional
class QuantState:
@classmethod
def from_dict(cls, qs_dict: dict[str, Any], device: torch.device) -> 'QuantState':
# offset: 1
<s>ch, qs_dict['nested_dtype']),
)
else:
offset, state2 = None, None
quant_state = cls(
quant_type=qs_dict['quant_type'],
absmax=qs_dict['absmax'].to(device),
blocksize=qs_dict['blocksize'],
+ code=qs_dict['quant_map'].to(device),
- code=qs_dict['code'].to(device),
dtype=getattr(torch, qs_dict['dtype']),
shape=torch.Size(qs_dict['shape']),
offset=offset,
state2=state2,
)
return quant_state
|
bitsandbytes.nn.modules/Params4bit.from_state_dict
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
7d1c9cfed2bc6ec76411544ba7c540e1d3510053
|
extra feats in constructor Params4bit
|
<13>:<add> cls.requires_grad = requires_grad
<del> cls.requires_grad = requires_grad,
<15>:<add> cls.blocksize = cls.quant_state.blocksize
<add> cls.compress_statistics = cls.quant_state.nested
<add> cls.quant_type = cls.quant_state.quant_type
|
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
@classmethod
def from_state_dict(cls, state_dict, prefix="", requires_grad=False):
<0> data = state_dict.pop(prefix.rstrip('.'))
<1>
<2> # extracting components for QuantState from state_dict
<3> qs_dict = {}
<4> for k, v in state_dict.items():
<5> if k.replace(prefix, '').split('.')[0] in QuantState.valid_qs_keys:
<6> qs_dict[k] = v
<7> state_dict = {k: v for k, v in state_dict.items() if k not in qs_dict}
<8> qs_dict = {k.replace(prefix, ''): v for k, v in qs_dict.items()}
<9>
<10> if data.device.type != "cuda":
<11> raise ValueError(f"`data.device.type` must be 'cuda', detected {data.device.type}")
<12>
<13> cls.requires_grad = requires_grad,
<14> cls.quant_state = QuantState.from_dict(qs_dict=qs_dict, device=data.device)
<15>
<16> self = torch.Tensor._make_subclass(cls, data=data.to(data.device))
<17> return self, state_dict
<18>
|
===========unchanged ref 0===========
at: bitsandbytes.functional
QuantState(absmax, shape=None, code=None, blocksize=None, quant_type=None, dtype=None, offset=None, state2=None)
at: bitsandbytes.functional.QuantState
valid_quant_types = ('fp4', 'nf4')
valid_qs_type_keys = [f"quant_state.bitsandbytes__{x}" for x in valid_quant_types]
valid_qs_keys = ['absmax', 'quant_map', 'nested_absmax', 'nested_quant_map', 'quant_state',
'quant_type', 'blocksize', 'dtype', 'shape', 'nested_blocksize', 'nested_dtype', 'nested_offset']
from_dict(qs_dict: Dict[str, Any], device: torch.device) -> 'QuantState'
|
bitsandbytes.functional/QuantState.from_dict
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
c6d0a847f13a31bcb5ba365bee9a456c40d7501d
|
cleanup 0
|
<5>:<add>
<del>
<14>:<add> raise ValueError(f"There should be exaclly one quant_state item with key from {cls.valid_qs_type_keys}. Detected {len(qs_key)} such items")
<del> raise ValueError(f"There should be exaclly one quant_state item with key from {self.valid_qs_type_keys}. Detected {len(qs_ley)} such items")
<15>:<add>
<del>
|
# module: bitsandbytes.functional
class QuantState:
-
@classmethod
def from_dict(cls, qs_dict: Dict[str, Any], device: torch.device) -> 'QuantState':
<0> """
<1> unpacks components of state_dict into QuantState
<2> where necessary, convert into strings, torch.dtype, ints, etc.
<3>
<4> qs_dict: based on state_dict, with only relevant keys, striped of prefixes.
<5>
<6> item with key `quant_state.bitsandbytes__[nf4/fp4]` may contain minor and non-tensor quant state items.
<7> """
<8>
<9> # unpacking tensor with non-tensor components
<10> qs_key = [k for k, v in qs_dict.items() if k in cls.valid_qs_type_keys and isinstance(v, torch.Tensor)]
<11> if not len(qs_key) and 'quant_type' not in qs_dict:
<12> raise ValueError("Expected packed or unpacked quant_state items, found neither")
<13> elif len(qs_key) != 1:
<14> raise ValueError(f"There should be exaclly one quant_state item with key from {self.valid_qs_type_keys}. Detected {len(qs_ley)} such items")
<15>
<16> # unpacking minor and non-tensor quant state items if necessary
<17> if len(qs_key) == 1:
<18> qs_key = qs_key[0]
<19> qs_dict |= unpack_tensor_to_dict(qs_dict.pop(qs_key))
<20>
<21> if 'nested_absmax' in qs_dict:
<22> offset = torch.tensor(float(qs_dict['nested_offset'])).to(device)
<23> state2 = cls(
<24> absmax=qs_dict['nested_absmax'].to(device),
<25> blocksize=qs_dict['nested_blocksize'],
<26> code=qs_dict['nested_quant_map'].to(device),
<27> dtype=getattr(torch, qs_dict['nested_dtype']),
<28> )
<29> else</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
class QuantState:
-
@classmethod
def from_dict(cls, qs_dict: Dict[str, Any], device: torch.device) -> 'QuantState':
# offset: 1
offset, state2 = None, None
quant_state = cls(
quant_type=qs_dict['quant_type'],
absmax=qs_dict['absmax'].to(device),
blocksize=qs_dict['blocksize'],
code=qs_dict['quant_map'].to(device),
dtype=getattr(torch, qs_dict['dtype']),
shape=torch.Size(qs_dict['shape']),
offset=offset,
state2=state2,
)
return quant_state
===========unchanged ref 0===========
at: bitsandbytes.functional.QuantState
valid_quant_types = ('fp4', 'nf4')
valid_qs_type_keys = [f"quant_state.bitsandbytes__{x}" for x in valid_quant_types]
valid_qs_keys = ['absmax', 'quant_map', 'nested_absmax', 'nested_quant_map', 'quant_state',
'quant_type', 'blocksize', 'dtype', 'shape', 'nested_blocksize', 'nested_dtype', 'nested_offset']
at: bitsandbytes.utils
unpack_tensor_to_dict(tensor_data)
at: torch._C
device(device: Union[_device, _int, str])
device(type: str, index: _int)
Size()
at: torch._C._VariableFunctions
tensor(data: Any, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor
at: typing
Dict = _alias(dict, 2, inst=False, name='Dict')
at: typing.MutableMapping
pop(key: _KT) -> _VT
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
|
bitsandbytes.functional/QuantState.from_dict
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
781fcd5b610efcc090544fdf61f69b7889e4a09d
|
partially reverted 76b40a5
|
<10>:<add> qs_key = [k for k, v in qs_dict.items() if "quant_state" in k and isinstance(v, torch.Tensor)]
<del> qs_key = [k for k, v in qs_dict.items() if k in cls.valid_qs_type_keys and isinstance(v, torch.Tensor)]
<12>:<add> raise ValueError("Expected packed or unpacked quant_state items, found neither")
<del> raise ValueError("Expected packed or unpacked quant_state items, found neither")
<13>:<add> elif len(qs_key) != 1 or qs_key[0].split(".")[-1] not in cls.valid_qs_type_keys:
<del> elif len(qs_key) != 1:
<14>:<add> raise ValueError(f"There should be exactly one `quant_state` item with ending from {cls.valid_qs_type_keys}.\nDetected {qs_key}.")
<del> raise ValueError(f"There should be exaclly one quant_state item with key from {cls.valid_qs_type_keys}. Detected {len(qs_key)} such items")
<19>:<add> qs_dict.update(unpack_tensor_to_dict(qs_dict.pop(qs_key)))
<del> qs_dict |= unpack_tensor_to_dict(qs_dict.pop(qs_key))
<20>:<add>
<add> qs_dict = {k.split('.')[-1]: v for k, v in qs_dict.items()} # strip prefixes
<add> assert set(qs_dict.keys()).issubset(cls.valid_qs_keys)
|
# module: bitsandbytes.functional
class QuantState:
@classmethod
def from_dict(cls, qs_dict: Dict[str, Any], device: torch.device) -> 'QuantState':
<0> """
<1> unpacks components of state_dict into QuantState
<2> where necessary, convert into strings, torch.dtype, ints, etc.
<3>
<4> qs_dict: based on state_dict, with only relevant keys, striped of prefixes.
<5>
<6> item with key `quant_state.bitsandbytes__[nf4/fp4]` may contain minor and non-tensor quant state items.
<7> """
<8>
<9> # unpacking tensor with non-tensor components
<10> qs_key = [k for k, v in qs_dict.items() if k in cls.valid_qs_type_keys and isinstance(v, torch.Tensor)]
<11> if not len(qs_key) and 'quant_type' not in qs_dict:
<12> raise ValueError("Expected packed or unpacked quant_state items, found neither")
<13> elif len(qs_key) != 1:
<14> raise ValueError(f"There should be exaclly one quant_state item with key from {cls.valid_qs_type_keys}. Detected {len(qs_key)} such items")
<15>
<16> # unpacking minor and non-tensor quant state items if necessary
<17> if len(qs_key) == 1:
<18> qs_key = qs_key[0]
<19> qs_dict |= unpack_tensor_to_dict(qs_dict.pop(qs_key))
<20>
<21> if 'nested_absmax' in qs_dict:
<22> offset = torch.tensor(float(qs_dict['nested_offset'])).to(device)
<23> state2 = cls(
<24> absmax=qs_dict['nested_absmax'].to(device),
<25> blocksize=qs_dict['nested_blocksize'],
<26> code=qs_dict['nested_quant_map'].to(device),
<27> dtype=getattr(torch, qs_dict['nested_dtype']),
<28> )
<29> else:
<30> offset</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
class QuantState:
@classmethod
def from_dict(cls, qs_dict: Dict[str, Any], device: torch.device) -> 'QuantState':
# offset: 1
quant_state = cls(
quant_type=qs_dict['quant_type'],
absmax=qs_dict['absmax'].to(device),
blocksize=qs_dict['blocksize'],
code=qs_dict['quant_map'].to(device),
dtype=getattr(torch, qs_dict['dtype']),
shape=torch.Size(qs_dict['shape']),
offset=offset,
state2=state2,
)
return quant_state
===========unchanged ref 0===========
at: bitsandbytes.functional.QuantState
valid_quant_types = ('fp4', 'nf4')
valid_qs_type_keys = [f"bitsandbytes__{x}" for x in valid_quant_types]
valid_qs_keys = ['absmax', 'quant_map', 'nested_absmax', 'nested_quant_map', 'quant_state', 'quant_type',
'blocksize', 'dtype', 'shape', 'nested_blocksize', 'nested_dtype', 'nested_offset']
at: bitsandbytes.utils
unpack_tensor_to_dict(tensor_data)
at: torch._C
device(device: Union[_device, _int, str])
device(type: str, index: _int)
Size()
at: torch._C._VariableFunctions
tensor(data: Any, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor
at: typing
Dict = _alias(dict, 2, inst=False, name='Dict')
at: typing.MutableMapping
pop(key: _KT) -> _VT
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
===========changed ref 0===========
# module: bitsandbytes.functional
class QuantState:
"""container for quantization state components to work with Params4bit and similar clases"""
valid_quant_types = ('fp4', 'nf4')
+ valid_qs_type_keys = [f"bitsandbytes__{x}" for x in valid_quant_types]
- valid_qs_type_keys = [f"quant_state.bitsandbytes__{x}" for x in valid_quant_types]
+ valid_qs_keys = ['absmax', 'quant_map', 'nested_absmax', 'nested_quant_map', 'quant_state', 'quant_type',
- valid_qs_keys = ['absmax', 'quant_map', 'nested_absmax', 'nested_quant_map', 'quant_state',
+ 'blocksize', 'dtype', 'shape', 'nested_blocksize', 'nested_dtype', 'nested_offset']
- 'quant_type', 'blocksize', 'dtype', 'shape', 'nested_blocksize', 'nested_dtype', 'nested_offset']
===========changed ref 1===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
+ @classmethod
+ def from_prequantized(cls, data, quantized_stats, requires_grad=False, device='cuda', **kwargs):
+ self = torch.Tensor._make_subclass(cls, data.to(device))
+ self.requires_grad = requires_grad
+ self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
+ self.blocksize = self.quant_state.blocksize
+ self.compress_statistics = self.quant_state.nested
+ self.quant_type = self.quant_state.quant_type
+ return self
+
===========changed ref 2===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
- def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
- missing_keys, unexpected_keys, error_msgs):
- # Note: super()._load_from_state_dict() is not called here intentionally.
- if self.bias is not None:
- bias_data = state_dict.pop(prefix + "bias", None)
- self.bias.data = bias_data.to(self.bias.data.device)
-
- self.weight, state_dict = bnb.nn.Params4bit.from_state_dict(
- state_dict, prefix=prefix + "weight" + ".", requires_grad=False
- )
- unexpected_keys.extend(state_dict.keys())
-
===========changed ref 3===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
- @classmethod
- def from_state_dict(cls, state_dict, prefix="", requires_grad=False):
- data = state_dict.pop(prefix.rstrip('.'))
-
- # extracting components for QuantState from state_dict
- qs_dict = {}
- for k, v in state_dict.items():
- if k.replace(prefix, '').split('.')[0] in QuantState.valid_qs_keys:
- qs_dict[k] = v
- state_dict = {k: v for k, v in state_dict.items() if k not in qs_dict}
- qs_dict = {k.replace(prefix, ''): v for k, v in qs_dict.items()}
-
- if data.device.type != "cuda":
- raise ValueError(f"`data.device.type` must be 'cuda', detected {data.device.type}")
-
- cls.requires_grad = requires_grad
- cls.quant_state = QuantState.from_dict(qs_dict=qs_dict, device=data.device)
- cls.blocksize = cls.quant_state.blocksize # this attribute can be deprecated - it duplicates same one in quant_state
- cls.compress_statistics = cls.quant_state.nested # this attribute can be deprecated - it duplicates quant_state.nested
- cls.quant_type = cls.quant_state.quant_type # this attribute can be deprecated - it duplicates same one in quant_state
-
- self = torch.Tensor._make_subclass(cls, data=data.to(data.device))
- return self, state_dict
-
|
bitsandbytes.functional/QuantState.to
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
781fcd5b610efcc090544fdf61f69b7889e4a09d
|
partially reverted 76b40a5
|
<2>:<add> self.offset = self.offset.to(device)
|
# module: bitsandbytes.functional
class QuantState:
def to(self, device):
<0> # make sure the quantization state is on the right device
<1> self.absmax = self.absmax.to(device)
<2> if self.nested:
<3> self.offset = self.offset.to(device)
<4> self.state2.absmax = self.state2.absmax.to(device)
<5> self.state2.code = self.state2.code.to(device)
<6>
|
===========unchanged ref 0===========
at: bitsandbytes.functional.QuantState.__init__
self.absmax = absmax
self.quant_type = quant_type
self.offset = offset
at: bitsandbytes.functional.QuantState.as_dict
non_tensor_dict = {k: v for k, v in qs_dict.items() if not isinstance(v, torch.Tensor)}
at: bitsandbytes.utils
pack_dict_to_tensor(source_dict)
===========changed ref 0===========
# module: bitsandbytes.functional
class QuantState:
"""container for quantization state components to work with Params4bit and similar clases"""
valid_quant_types = ('fp4', 'nf4')
+ valid_qs_type_keys = [f"bitsandbytes__{x}" for x in valid_quant_types]
- valid_qs_type_keys = [f"quant_state.bitsandbytes__{x}" for x in valid_quant_types]
+ valid_qs_keys = ['absmax', 'quant_map', 'nested_absmax', 'nested_quant_map', 'quant_state', 'quant_type',
- valid_qs_keys = ['absmax', 'quant_map', 'nested_absmax', 'nested_quant_map', 'quant_state',
+ 'blocksize', 'dtype', 'shape', 'nested_blocksize', 'nested_dtype', 'nested_offset']
- 'quant_type', 'blocksize', 'dtype', 'shape', 'nested_blocksize', 'nested_dtype', 'nested_offset']
===========changed ref 1===========
# module: bitsandbytes.functional
class QuantState:
@classmethod
def from_dict(cls, qs_dict: Dict[str, Any], device: torch.device) -> 'QuantState':
"""
unpacks components of state_dict into QuantState
where necessary, convert into strings, torch.dtype, ints, etc.
qs_dict: based on state_dict, with only relevant keys, striped of prefixes.
item with key `quant_state.bitsandbytes__[nf4/fp4]` may contain minor and non-tensor quant state items.
"""
# unpacking tensor with non-tensor components
+ qs_key = [k for k, v in qs_dict.items() if "quant_state" in k and isinstance(v, torch.Tensor)]
- qs_key = [k for k, v in qs_dict.items() if k in cls.valid_qs_type_keys and isinstance(v, torch.Tensor)]
if not len(qs_key) and 'quant_type' not in qs_dict:
+ raise ValueError("Expected packed or unpacked quant_state items, found neither")
- raise ValueError("Expected packed or unpacked quant_state items, found neither")
+ elif len(qs_key) != 1 or qs_key[0].split(".")[-1] not in cls.valid_qs_type_keys:
- elif len(qs_key) != 1:
+ raise ValueError(f"There should be exactly one `quant_state` item with ending from {cls.valid_qs_type_keys}.\nDetected {qs_key}.")
- raise ValueError(f"There should be exaclly one quant_state item with key from {cls.valid_qs_type_keys}. Detected {len(qs_key)} such items")
# unpacking minor and non-tensor quant state items if necessary
if len(qs_key) == 1:
qs_key = qs_key[0]
+ qs_dict.update(unpack_tensor_to_dict(qs_dict.pop(qs_key)))
- qs_dict |= unpack_tensor_to</s>
===========changed ref 2===========
# module: bitsandbytes.functional
class QuantState:
@classmethod
def from_dict(cls, qs_dict: Dict[str, Any], device: torch.device) -> 'QuantState':
# offset: 1
<s>unpack_tensor_to_dict(qs_dict.pop(qs_key)))
- qs_dict |= unpack_tensor_to_dict(qs_dict.pop(qs_key))
+
+ qs_dict = {k.split('.')[-1]: v for k, v in qs_dict.items()} # strip prefixes
+ assert set(qs_dict.keys()).issubset(cls.valid_qs_keys)
if 'nested_absmax' in qs_dict:
offset = torch.tensor(float(qs_dict['nested_offset'])).to(device)
state2 = cls(
absmax=qs_dict['nested_absmax'].to(device),
blocksize=qs_dict['nested_blocksize'],
code=qs_dict['nested_quant_map'].to(device),
dtype=getattr(torch, qs_dict['nested_dtype']),
)
else:
offset, state2 = None, None
quant_state = cls(
quant_type=qs_dict['quant_type'],
absmax=qs_dict['absmax'].to(device),
blocksize=qs_dict['blocksize'],
code=qs_dict['quant_map'].to(device),
dtype=getattr(torch, qs_dict['dtype']),
shape=torch.Size(qs_dict['shape']),
offset=offset,
state2=state2,
)
return quant_state
===========changed ref 3===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
+ @classmethod
+ def from_prequantized(cls, data, quantized_stats, requires_grad=False, device='cuda', **kwargs):
+ self = torch.Tensor._make_subclass(cls, data.to(device))
+ self.requires_grad = requires_grad
+ self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
+ self.blocksize = self.quant_state.blocksize
+ self.compress_statistics = self.quant_state.nested
+ self.quant_type = self.quant_state.quant_type
+ return self
+
===========changed ref 4===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
- def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
- missing_keys, unexpected_keys, error_msgs):
- # Note: super()._load_from_state_dict() is not called here intentionally.
- if self.bias is not None:
- bias_data = state_dict.pop(prefix + "bias", None)
- self.bias.data = bias_data.to(self.bias.data.device)
-
- self.weight, state_dict = bnb.nn.Params4bit.from_state_dict(
- state_dict, prefix=prefix + "weight" + ".", requires_grad=False
- )
- unexpected_keys.extend(state_dict.keys())
-
|
tests.test_linear4bit/test_linear_serialization
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
781fcd5b610efcc090544fdf61f69b7889e4a09d
|
partially reverted 76b40a5
|
<24>:<add> # restoring from state_dict:
<add> bias_data2 = sd.pop("bias", None)
<add> weight_data2 = sd.pop("weight")
<add> weight2 = bnb.nn.Params4bit.from_prequantized(quantized_stats=sd, data=weight_data2)
<del>
<36>:<add> linear_q2.weight = weight2.to(device)
<add> if bias:
<add> linear_q2.bias = torch.nn.Parameter(bias_data2)
<del> linear_q2.load_state_dict(sd)
|
# module: tests.test_linear4bit
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
@pytest.mark.parametrize(
"quant_type, compress_statistics, bias",
list(product(["nf4", "fp4"], [False, True], [False, True])),
)
def test_linear_serialization(quant_type, compress_statistics, bias):
<0> original_dtype = torch.float16
<1> compute_dtype = None
<2> device = "cuda"
<3> layer_shape = (300, 400)
<4>
<5> linear = torch.nn.Linear(*layer_shape, dtype=original_dtype) # original layer
<6>
<7> # Quantizing original layer
<8> linear_q = bnb.nn.Linear4bit(
<9> linear.in_features,
<10> linear.out_features,
<11> bias=bias,
<12> compute_dtype=compute_dtype,
<13> compress_statistics=compress_statistics,
<14> quant_type=quant_type,
<15> device=device,
<16> )
<17> new_weight = bnb.nn.Params4bit(data=linear.weight, requires_grad=False)
<18> linear_q.weight = new_weight.to(device)
<19> if bias:
<20> linear_q.bias.data = linear.bias.data.to(device)
<21>
<22> # saving to state_dict:
<23> sd = linear_q.state_dict()
<24>
<25> # creating new layer with same params:
<26> linear_q2 = bnb.nn.Linear4bit(
<27> linear.in_features,
<28> linear.out_features,
<29> bias=bias,
<30> compute_dtype=compute_dtype,
<31> compress_statistics=compress_statistics,
<32> quant_type=quant_type,
<33> device=device, # TODO create on meta device to save loading time
<34> )
<35> # loading weights from state_dict:
<36> linear_q2.load_state_dict(sd)
<37>
</s>
|
===========below chunk 0===========
# module: tests.test_linear4bit
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
@pytest.mark.parametrize(
"quant_type, compress_statistics, bias",
list(product(["nf4", "fp4"], [False, True], [False, True])),
)
def test_linear_serialization(quant_type, compress_statistics, bias):
# offset: 1
a, b = linear_q.weight, linear_q2.weight
assert a.device == b.device
assert a.dtype == b.dtype
assert torch.equal(a, b)
q0 = a.quant_state
q1 = b.quant_state
for attr in ('code', 'dtype', 'blocksize', 'absmax'):
c, d = getattr(q0, attr), getattr(q1, attr)
if isinstance(c, torch.Tensor):
assert torch.equal(c, d)
else:
assert c == d, f"{c} != {d}"
if q0.state2 is not None:
for attr in ('code', 'dtype', 'blocksize', 'absmax'):
c, d = getattr(q0.state2, attr), getattr(q1.state2, attr)
if isinstance(c, torch.Tensor):
assert torch.equal(c, d)
else:
assert c == d, f"{c} != {d}"
if bias:
a, b = linear_q.bias, linear_q2.bias
assert a.device == b.device
assert a.dtype == b.dtype
assert torch.equal(a, b)
# Forward test
x = torch.rand(42, layer_shape[0], device=device)
a = linear_q(x)
b = linear_q2(x)
assert a.device == b.device
assert a.dtype == b.dtype
assert torch.equal(a, b)
# Saved size ratio test.</s>
===========below chunk 1===========
# module: tests.test_linear4bit
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
@pytest.mark.parametrize(
"quant_type, compress_statistics, bias",
list(product(["nf4", "fp4"], [False, True], [False, True])),
)
def test_linear_serialization(quant_type, compress_statistics, bias):
# offset: 2
<s> assert a.dtype == b.dtype
assert torch.equal(a, b)
# Saved size ratio test. Target set for layer_shape == (300, 400) w/ bias
with TemporaryDirectory() as tmpdir:
state_path_4bit = os.path.join(tmpdir, "state_4bit.pth")
state_path = os.path.join(tmpdir, "state.pth")
torch.save(linear.state_dict(), state_path)
torch.save(linear_q.state_dict(), state_path_4bit)
size_orig, size_4 = os.path.getsize(state_path), os.path.getsize(
state_path_4bit
)
size_ratio = size_4 / size_orig
target_compression = 0.143 if original_dtype == torch.float32 else 0.285
ratio_error_msg = f"quantized_size {size_4:,} is larger on disk than {target_compression:.2%} of original size {size_orig:,}"
assert size_ratio < target_compression, ratio_error_msg
===========unchanged ref 0===========
at: bitsandbytes.functional.QuantState.__init__
self.state2 = state2
at: bitsandbytes.nn.modules
Params4bit(data: Tensor=..., requires_grad: builtins.bool=...)
Linear4bit(input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4', device=None)
at: bitsandbytes.nn.modules.Linear4bit.__init__
self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
at: bitsandbytes.nn.modules.Linear4bit._load_from_state_dict
self.weight, state_dict = bnb.nn.Params4bit.from_state_dict(
state_dict, prefix=prefix + "weight" + ".", requires_grad=False
)
at: bitsandbytes.nn.modules.Params4bit
to(device: Optional[Union[int, device]]=..., dtype: Optional[Union[dtype, str]]=..., non_blocking: bool=...) -> T
to(tensor: Tensor, non_blocking: bool=...) -> T
to(dtype: Union[dtype, str], non_blocking: bool=...) -> T
at: bitsandbytes.nn.modules.Params4bit.cuda
self.quant_state = quant_state
at: bitsandbytes.nn.modules.Params4bit.from_state_dict
cls.quant_state = QuantState.from_dict(qs_dict=qs_dict, device=data.device)
===========unchanged ref 1===========
at: itertools
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4], iter5: Iterable[_T5], iter6: Iterable[_T6]) -> Iterator[Tuple[_T1, _T2, _T3, _T4, _T5, _T6]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3]) -> Iterator[Tuple[_T1, _T2, _T3]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4]) -> Iterator[Tuple[_T1, _T2, _T3, _T4]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2]) -> Iterator[Tuple[_T1, _T2]]
product(*iterables: Iterable[_T1], repeat: int) -> Iterator[Tuple[_T1, ...]]
product(iter1: Iterable[_T1]) -> Iterator[Tuple[_T1]]
product(*iterables: Iterable[Any], repeat: int=...) -> Iterator[Tuple[Any, ...]]
product(iter1: Iterable[Any], iter2: Iterable[Any], iter3: Iterable[Any], iter4: Iterable[Any], iter5: Iterable[Any], iter6: Iterable[Any], iter7: Iterable[Any], *iterables: Iterable[Any]) -> Iterator[Tuple[Any, ...]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4], iter5: Iterable[_T5]) -> Iterator[Tuple[_T1, _T2, _T3, _T4, _T5]]
at: os.path
join(a: StrPath, *paths: StrPath) -> str
join(a: BytesPath, *paths: BytesPath) -> bytes
getsize(filename: AnyPath) -> int
===========unchanged ref 2===========
at: tempfile
TemporaryDirectory(suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=...)
at: torch._C
float16: dtype = ...
at: torch._C._VariableFunctions
equal(input: Tensor, other: Tensor) -> _bool
|
bitsandbytes.functional/QuantState.to
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
74c00eb1c42d889066d5c7adaad984ac7ade0d82
|
remove moving offset if not nested
|
<2>:<del> self.offset = self.offset.to(device)
|
# module: bitsandbytes.functional
class QuantState:
def to(self, device):
<0> # make sure the quantization state is on the right device
<1> self.absmax = self.absmax.to(device)
<2> self.offset = self.offset.to(device)
<3> if self.nested:
<4> self.offset = self.offset.to(device)
<5> self.state2.absmax = self.state2.absmax.to(device)
<6> self.state2.code = self.state2.code.to(device)
<7>
|
===========unchanged ref 0===========
at: bitsandbytes.functional.QuantState
valid_quant_types = ('fp4', 'nf4')
valid_qs_type_keys = [f"bitsandbytes__{x}" for x in valid_quant_types]
valid_qs_keys = ['absmax', 'quant_map', 'nested_absmax', 'nested_quant_map', 'quant_state', 'quant_type',
'blocksize', 'dtype', 'shape', 'nested_blocksize', 'nested_dtype', 'nested_offset']
at: bitsandbytes.functional.QuantState.__init__
self.absmax = absmax
self.offset = offset
self.state2 = state2
self.nested = state2 is not None
|
bitsandbytes.functional/QuantState.as_dict
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
ffd46ce1823010916b4c1edc565ef14961b637d7
|
fixes for init and tests
|
<10>:<add> 'shape': tuple(self.shape),
<del> 'shape': tuple(self.shape) if self.nested else None,
|
# module: bitsandbytes.functional
class QuantState:
def as_dict(self, packed=False):
<0> """
<1> returns dict of tensors and strings to use in serialization via _save_to_state_dict()
<2> param: packed -- returns dict[str, torch.Tensor] for state_dict
<3> """
<4> qs_dict = {
<5> 'quant_type': self.quant_type,
<6> 'absmax': self.absmax,
<7> 'blocksize': self.blocksize,
<8> 'quant_map': self.code,
<9> 'dtype': str(self.dtype).strip('torch.'),
<10> 'shape': tuple(self.shape) if self.nested else None,
<11> }
<12> if self.nested:
<13> qs_dict.update({
<14> 'nested_absmax': self.state2.absmax,
<15> 'nested_blocksize': self.state2.blocksize,
<16> 'nested_quant_map': self.state2.code,
<17> 'nested_dtype': str(self.state2.dtype).strip('torch.'),
<18> 'nested_offset': self.offset.item(),
<19> })
<20> if not packed:
<21> return qs_dict
<22>
<23> qs_packed_dict = {k: v for k, v in qs_dict.items() if isinstance(v, torch.Tensor)}
<24> non_tensor_dict = {k: v for k, v in qs_dict.items() if not isinstance(v, torch.Tensor)}
<25> qs_packed_dict["quant_state." + "bitsandbytes__" + self.quant_type] = pack_dict_to_tensor(non_tensor_dict)
<26> return qs_packed_dict
<27>
|
===========unchanged ref 0===========
at: bitsandbytes.functional.QuantState
valid_quant_types = ('fp4', 'nf4')
valid_qs_type_keys = [f"bitsandbytes__{x}" for x in valid_quant_types]
valid_qs_keys = ['absmax', 'quant_map', 'nested_absmax', 'nested_quant_map', 'quant_state', 'quant_type',
'blocksize', 'dtype', 'shape', 'nested_blocksize', 'nested_dtype', 'nested_offset']
at: bitsandbytes.functional.QuantState.__init__
self.absmax = absmax
self.shape = shape
self.code = code
self.dtype = dtype
self.blocksize = blocksize
self.quant_type = quant_type
self.offset = offset
self.state2 = state2
self.nested = state2 is not None
at: bitsandbytes.functional.QuantState.to
self.absmax = self.absmax.to(device)
self.offset = self.offset.to(device)
at: bitsandbytes.utils
pack_dict_to_tensor(source_dict)
|
tests.test_linear4bit/test_linear_serialization
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
ffd46ce1823010916b4c1edc565ef14961b637d7
|
fixes for init and tests
|
<5>:<add> linear = torch.nn.Linear(*layer_shape, dtype=original_dtype, device="cpu") # original layer
<del> linear = torch.nn.Linear(*layer_shape, dtype=original_dtype) # original layer
<15>:<add> device="meta", # TODO: consider both CPU, meta and CUDA creation
<del> device=device,
<18>:<add> linear_q.weight = new_weight
<del> linear_q.weight = new_weight.to(device)
<20>:<add> linear_q.bias = torch.nn.Parameter(linear.bias)
<add> linear_q = linear_q.to(device)
<del> linear_q.bias.data = linear.bias.data.to(device)
<24>:<add>
<28>:<add>
|
# module: tests.test_linear4bit
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
@pytest.mark.parametrize(
"quant_type, compress_statistics, bias",
list(product(["nf4", "fp4"], [False, True], [False, True])),
)
def test_linear_serialization(quant_type, compress_statistics, bias):
<0> original_dtype = torch.float16
<1> compute_dtype = None
<2> device = "cuda"
<3> layer_shape = (300, 400)
<4>
<5> linear = torch.nn.Linear(*layer_shape, dtype=original_dtype) # original layer
<6>
<7> # Quantizing original layer
<8> linear_q = bnb.nn.Linear4bit(
<9> linear.in_features,
<10> linear.out_features,
<11> bias=bias,
<12> compute_dtype=compute_dtype,
<13> compress_statistics=compress_statistics,
<14> quant_type=quant_type,
<15> device=device,
<16> )
<17> new_weight = bnb.nn.Params4bit(data=linear.weight, requires_grad=False)
<18> linear_q.weight = new_weight.to(device)
<19> if bias:
<20> linear_q.bias.data = linear.bias.data.to(device)
<21>
<22> # saving to state_dict:
<23> sd = linear_q.state_dict()
<24> # restoring from state_dict:
<25> bias_data2 = sd.pop("bias", None)
<26> weight_data2 = sd.pop("weight")
<27> weight2 = bnb.nn.Params4bit.from_prequantized(quantized_stats=sd, data=weight_data2)
<28> # creating new layer with same params:
<29> linear_q2 = bnb.nn.Linear4bit(
<30> linear.in_features,
<31> linear.out_features,
<32> bias=bias,
<33> compute_dtype=compute_dtype,
</s>
|
===========below chunk 0===========
# module: tests.test_linear4bit
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
@pytest.mark.parametrize(
"quant_type, compress_statistics, bias",
list(product(["nf4", "fp4"], [False, True], [False, True])),
)
def test_linear_serialization(quant_type, compress_statistics, bias):
# offset: 1
quant_type=quant_type,
device=device, # TODO create on meta device to save loading time
)
# loading weights from state_dict:
linear_q2.weight = weight2.to(device)
if bias:
linear_q2.bias = torch.nn.Parameter(bias_data2)
# MATCHING
a, b = linear_q.weight, linear_q2.weight
assert a.device == b.device
assert a.dtype == b.dtype
assert torch.equal(a, b)
q0 = a.quant_state
q1 = b.quant_state
for attr in ('code', 'dtype', 'blocksize', 'absmax'):
c, d = getattr(q0, attr), getattr(q1, attr)
if isinstance(c, torch.Tensor):
assert torch.equal(c, d)
else:
assert c == d, f"{c} != {d}"
if q0.state2 is not None:
for attr in ('code', 'dtype', 'blocksize', 'absmax'):
c, d = getattr(q0.state2, attr), getattr(q1.state2, attr)
if isinstance(c, torch.Tensor):
assert torch.equal(c, d)
else:
assert c == d, f"{c} != {d}"
if bias:
a, b = linear_q.bias, linear_q2.bias
assert a.device == b.device
assert a.dtype == b.dtype
assert torch.equal(a, b)
</s>
===========below chunk 1===========
# module: tests.test_linear4bit
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
@pytest.mark.parametrize(
"quant_type, compress_statistics, bias",
list(product(["nf4", "fp4"], [False, True], [False, True])),
)
def test_linear_serialization(quant_type, compress_statistics, bias):
# offset: 2
<s> a.device == b.device
assert a.dtype == b.dtype
assert torch.equal(a, b)
# Forward test
x = torch.rand(42, layer_shape[0], device=device)
a = linear_q(x)
b = linear_q2(x)
assert a.device == b.device
assert a.dtype == b.dtype
assert torch.equal(a, b)
# Saved size ratio test. Target set for layer_shape == (300, 400) w/ bias
with TemporaryDirectory() as tmpdir:
state_path_4bit = os.path.join(tmpdir, "state_4bit.pth")
state_path = os.path.join(tmpdir, "state.pth")
torch.save(linear.state_dict(), state_path)
torch.save(linear_q.state_dict(), state_path_4bit)
size_orig, size_4 = os.path.getsize(state_path), os.path.getsize(
state_path_4bit
)
size_ratio = size_4 / size_orig
target_compression = 0.143 if original_dtype == torch.float32 else 0.285
ratio_error_msg = f"quantized_size {size_4:,} is larger on disk than {target_compression:.2%} of original size {size_orig:,}"
assert size_ratio < target_compression, ratio_error_msg
===========unchanged ref 0===========
at: _pytest.mark.structures
MARK_GEN = MarkGenerator(_ispytest=True)
at: _pytest.mark.structures.MarkGenerator
skip: _SkipMarkDecorator
skipif: _SkipifMarkDecorator
xfail: _XfailMarkDecorator
parametrize: _ParametrizeMarkDecorator
usefixtures: _UsefixturesMarkDecorator
filterwarnings: _FilterwarningsMarkDecorator
at: bitsandbytes.nn.modules
Params4bit(data: Tensor=..., requires_grad: builtins.bool=...)
Linear4bit(input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4', device=None)
at: bitsandbytes.nn.modules.Linear4bit.__init__
self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
at: bitsandbytes.nn.modules.Params4bit
from_prequantized(data: torch.Tensor, quantized_stats: Dict[str, Any], requires_grad: bool=False, device='cuda', **kwargs) -> "Params4bit"
at: bitsandbytes.nn.modules.Params4bit.cuda
self.quant_state = quant_state
===========unchanged ref 1===========
at: itertools
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4], iter5: Iterable[_T5], iter6: Iterable[_T6]) -> Iterator[Tuple[_T1, _T2, _T3, _T4, _T5, _T6]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3]) -> Iterator[Tuple[_T1, _T2, _T3]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4]) -> Iterator[Tuple[_T1, _T2, _T3, _T4]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2]) -> Iterator[Tuple[_T1, _T2]]
product(*iterables: Iterable[_T1], repeat: int) -> Iterator[Tuple[_T1, ...]]
product(iter1: Iterable[_T1]) -> Iterator[Tuple[_T1]]
product(*iterables: Iterable[Any], repeat: int=...) -> Iterator[Tuple[Any, ...]]
product(iter1: Iterable[Any], iter2: Iterable[Any], iter3: Iterable[Any], iter4: Iterable[Any], iter5: Iterable[Any], iter6: Iterable[Any], iter7: Iterable[Any], *iterables: Iterable[Any]) -> Iterator[Tuple[Any, ...]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4], iter5: Iterable[_T5]) -> Iterator[Tuple[_T1, _T2, _T3, _T4, _T5]]
at: os.path
join(a: StrPath, *paths: StrPath) -> str
join(a: BytesPath, *paths: BytesPath) -> bytes
getsize(filename: AnyPath) -> int
===========unchanged ref 2===========
at: tempfile
TemporaryDirectory(suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=...)
at: torch._C
float16: dtype = ...
at: torch._C._VariableFunctions
equal(input: Tensor, other: Tensor) -> _bool
|
bitsandbytes.functional/QuantState.as_dict
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
de44be1a1b48840dec863f5be11a259c0c7cb0bf
|
unshared nested_quant_map
|
<2>:<add> param: packed -- returns dict[str, torch.Tensor] for state_dict fit for safetensors saving
<del> param: packed -- returns dict[str, torch.Tensor] for state_dict
<16>:<add> 'nested_quant_map': self.state2.code.clone(), # un-shared to avoid restoring it after shared tensors are removed by safetensors
<del> 'nested_quant_map': self.state2.code,
<23>:<add> # packed format allows serialization of non-tensor components, critical for saving in safetensors format
|
# module: bitsandbytes.functional
class QuantState:
def as_dict(self, packed=False):
<0> """
<1> returns dict of tensors and strings to use in serialization via _save_to_state_dict()
<2> param: packed -- returns dict[str, torch.Tensor] for state_dict
<3> """
<4> qs_dict = {
<5> 'quant_type': self.quant_type,
<6> 'absmax': self.absmax,
<7> 'blocksize': self.blocksize,
<8> 'quant_map': self.code,
<9> 'dtype': str(self.dtype).strip('torch.'),
<10> 'shape': tuple(self.shape),
<11> }
<12> if self.nested:
<13> qs_dict.update({
<14> 'nested_absmax': self.state2.absmax,
<15> 'nested_blocksize': self.state2.blocksize,
<16> 'nested_quant_map': self.state2.code,
<17> 'nested_dtype': str(self.state2.dtype).strip('torch.'),
<18> 'nested_offset': self.offset.item(),
<19> })
<20> if not packed:
<21> return qs_dict
<22>
<23> qs_packed_dict = {k: v for k, v in qs_dict.items() if isinstance(v, torch.Tensor)}
<24> non_tensor_dict = {k: v for k, v in qs_dict.items() if not isinstance(v, torch.Tensor)}
<25> qs_packed_dict["quant_state." + "bitsandbytes__" + self.quant_type] = pack_dict_to_tensor(non_tensor_dict)
<26> return qs_packed_dict
<27>
|
===========unchanged ref 0===========
at: bitsandbytes.functional.QuantState
valid_quant_types = ('fp4', 'nf4')
valid_qs_type_keys = [f"bitsandbytes__{x}" for x in valid_quant_types]
valid_qs_keys = ['absmax', 'quant_map', 'nested_absmax', 'nested_quant_map', 'quant_state', 'quant_type',
'blocksize', 'dtype', 'shape', 'nested_blocksize', 'nested_dtype', 'nested_offset']
at: bitsandbytes.functional.QuantState.__init__
self.absmax = absmax
self.shape = shape
self.code = code
self.dtype = dtype
self.blocksize = blocksize
self.quant_type = quant_type
self.offset = offset
self.state2 = state2
self.nested = state2 is not None
at: bitsandbytes.functional.QuantState.to
self.absmax = self.absmax.to(device)
self.offset = self.offset.to(device)
at: bitsandbytes.utils
pack_dict_to_tensor(source_dict)
|
bitsandbytes.functional/QuantState.from_dict
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
079d7afe3468a9f9ad0c8214d5c5055bdedaccbf
|
extra check for qs_dict['shape'] is None
|
# module: bitsandbytes.functional
class QuantState:
@classmethod
def from_dict(cls, qs_dict: Dict[str, Any], device: torch.device) -> 'QuantState':
<0> """
<1> unpacks components of state_dict into QuantState
<2> where necessary, convert into strings, torch.dtype, ints, etc.
<3>
<4> qs_dict: based on state_dict, with only relevant keys, striped of prefixes.
<5>
<6> item with key `quant_state.bitsandbytes__[nf4/fp4]` may contain minor and non-tensor quant state items.
<7> """
<8>
<9> # unpacking tensor with non-tensor components
<10> qs_key = [k for k, v in qs_dict.items() if "quant_state" in k and isinstance(v, torch.Tensor)]
<11> if not len(qs_key) and 'quant_type' not in qs_dict:
<12> raise ValueError("Expected packed or unpacked quant_state items, found neither")
<13> elif len(qs_key) != 1 or qs_key[0].split(".")[-1] not in cls.valid_qs_type_keys:
<14> raise ValueError(f"There should be exactly one `quant_state` item with ending from {cls.valid_qs_type_keys}.\nDetected {qs_key}.")
<15>
<16> # unpacking minor and non-tensor quant state items if necessary
<17> if len(qs_key) == 1:
<18> qs_key = qs_key[0]
<19> qs_dict.update(unpack_tensor_to_dict(qs_dict.pop(qs_key)))
<20>
<21> qs_dict = {k.split('.')[-1]: v for k, v in qs_dict.items()} # strip prefixes
<22> assert set(qs_dict.keys()).issubset(cls.valid_qs_keys)
<23>
<24> if 'nested_absmax' in qs_dict:
<25> offset = torch.tensor(float(qs_dict['nested_offset'])).to(device)
<26> state2 = cls(
<27> absmax=</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
class QuantState:
@classmethod
def from_dict(cls, qs_dict: Dict[str, Any], device: torch.device) -> 'QuantState':
# offset: 1
blocksize=qs_dict['nested_blocksize'],
code=qs_dict['nested_quant_map'].to(device),
dtype=getattr(torch, qs_dict['nested_dtype']),
)
else:
offset, state2 = None, None
quant_state = cls(
quant_type=qs_dict['quant_type'],
absmax=qs_dict['absmax'].to(device),
blocksize=qs_dict['blocksize'],
code=qs_dict['quant_map'].to(device),
dtype=getattr(torch, qs_dict['dtype']),
shape=torch.Size(qs_dict['shape']),
offset=offset,
state2=state2,
)
return quant_state
===========unchanged ref 0===========
at: bitsandbytes.functional.QuantState
valid_quant_types = ('fp4', 'nf4')
valid_qs_type_keys = [f"bitsandbytes__{x}" for x in valid_quant_types]
valid_qs_keys = ['absmax', 'quant_map', 'nested_absmax', 'nested_quant_map', 'quant_state', 'quant_type',
'blocksize', 'dtype', 'shape', 'nested_blocksize', 'nested_dtype', 'nested_offset']
at: bitsandbytes.utils
unpack_tensor_to_dict(tensor_data)
at: torch._C
device(device: Union[_device, _int, str])
device(type: str, index: _int)
Size()
at: torch._C._VariableFunctions
tensor(data: Any, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor
at: typing
Dict = _alias(dict, 2, inst=False, name='Dict')
at: typing.MutableMapping
pop(key: _KT) -> _VT
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
|
|
bitsandbytes.cuda_setup.env_vars/to_be_ignored
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
c8f564d51c090a14e34b486892efc9013a285cf2
|
Merge pull request #868 from poedator/fix_1108
|
<5>:<add> "GOOGLE_VM_CONFIG_LOCK_FILE", # on GCP setups, requires elevated permissions, causing problems in Jupyter notebooks
|
# module: bitsandbytes.cuda_setup.env_vars
def to_be_ignored(env_var: str, value: str) -> bool:
<0> ignorable = {
<1> "PWD", # PWD: this is how the shell keeps track of the current working dir
<2> "OLDPWD",
<3> "SSH_AUTH_SOCK", # SSH stuff, therefore unrelated
<4> "SSH_TTY",
<5> "HOME", # Linux shell default
<6> "TMUX", # Terminal Multiplexer
<7> "XDG_DATA_DIRS", # XDG: Desktop environment stuff
<8> "XDG_GREETER_DATA_DIR", # XDG: Desktop environment stuff
<9> "XDG_RUNTIME_DIR",
<10> "MAIL", # something related to emails
<11> "SHELL", # binary for currently invoked shell
<12> "DBUS_SESSION_BUS_ADDRESS", # hardware related
<13> "PATH", # this is for finding binaries, not libraries
<14> "LESSOPEN", # related to the `less` command
<15> "LESSCLOSE",
<16> "_", # current Python interpreter
<17> }
<18> return env_var in ignorable
<19>
| |
bitsandbytes.nn.modules/Params4bit.cuda
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
c8f564d51c090a14e34b486892efc9013a285cf2
|
Merge pull request #868 from poedator/fix_1108
|
<0>:<add> if self.quant_state is not None:
<add> return self
|
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
def cuda(self, device):
<0> w = self.data.contiguous().half().cuda(device)
<1> w_4bit, quant_state = bnb.functional.quantize_4bit(w, blocksize=self.blocksize, compress_statistics=self.compress_statistics, quant_type=self.quant_type)
<2> self.data = w_4bit
<3> self.quant_state = quant_state
<4>
<5> return self
<6>
|
===========unchanged ref 0===========
at: bitsandbytes.functional
quantize_4bit(A: Tensor, absmax: Tensor=None, out: Tensor=None, blocksize=64, compress_statistics=False, quant_type='fp4') -> Tensor
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.env_vars
def to_be_ignored(env_var: str, value: str) -> bool:
ignorable = {
"PWD", # PWD: this is how the shell keeps track of the current working dir
"OLDPWD",
"SSH_AUTH_SOCK", # SSH stuff, therefore unrelated
"SSH_TTY",
+ "GOOGLE_VM_CONFIG_LOCK_FILE", # on GCP setups, requires elevated permissions, causing problems in Jupyter notebooks
"HOME", # Linux shell default
"TMUX", # Terminal Multiplexer
"XDG_DATA_DIRS", # XDG: Desktop environment stuff
"XDG_GREETER_DATA_DIR", # XDG: Desktop environment stuff
"XDG_RUNTIME_DIR",
"MAIL", # something related to emails
"SHELL", # binary for currently invoked shell
"DBUS_SESSION_BUS_ADDRESS", # hardware related
"PATH", # this is for finding binaries, not libraries
"LESSOPEN", # related to the `less` command
"LESSCLOSE",
"_", # current Python interpreter
}
return env_var in ignorable
|
bitsandbytes.nn.modules/Params4bit.cuda
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
b601979ad8b7d4bd5981abcbcfa65b7e519df4ce
|
Revert "Merge PR#867 from jph00/patch-2" due to failing PEFT integration tests.
|
<0>:<del> if self.quant_state is not None:
<1>:<del> return self
|
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
def cuda(self, device):
<0> if self.quant_state is not None:
<1> return self
<2> w = self.data.contiguous().half().cuda(device)
<3> w_4bit, quant_state = bnb.functional.quantize_4bit(w, blocksize=self.blocksize, compress_statistics=self.compress_statistics, quant_type=self.quant_type)
<4> self.data = w_4bit
<5> self.quant_state = quant_state
<6>
<7> return self
<8>
|
===========unchanged ref 0===========
at: bitsandbytes.functional
quantize_4bit(A: Tensor, absmax: Tensor=None, out: Tensor=None, blocksize=64, compress_statistics=False, quant_type='fp4') -> Tensor
|
bitsandbytes.cuda_setup.env_vars/to_be_ignored
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
3e706031150ac26fd4c84cac2976c52be14ead11
|
Merge pull request #905 from LucQueen/outofbounds
|
<17>:<add> "GOOGLE_VM_CONFIG_LOCK_FILE", # Google Cloud stuff, contains root only paths
|
# module: bitsandbytes.cuda_setup.env_vars
def to_be_ignored(env_var: str, value: str) -> bool:
<0> ignorable = {
<1> "PWD", # PWD: this is how the shell keeps track of the current working dir
<2> "OLDPWD",
<3> "SSH_AUTH_SOCK", # SSH stuff, therefore unrelated
<4> "SSH_TTY",
<5> "GOOGLE_VM_CONFIG_LOCK_FILE", # on GCP setups, requires elevated permissions, causing problems in Jupyter notebooks
<6> "HOME", # Linux shell default
<7> "TMUX", # Terminal Multiplexer
<8> "XDG_DATA_DIRS", # XDG: Desktop environment stuff
<9> "XDG_GREETER_DATA_DIR", # XDG: Desktop environment stuff
<10> "XDG_RUNTIME_DIR",
<11> "MAIL", # something related to emails
<12> "SHELL", # binary for currently invoked shell
<13> "DBUS_SESSION_BUS_ADDRESS", # hardware related
<14> "PATH", # this is for finding binaries, not libraries
<15> "LESSOPEN", # related to the `less` command
<16> "LESSCLOSE",
<17> "_", # current Python interpreter
<18> }
<19> return env_var in ignorable
<20>
| |
bitsandbytes.cuda_setup.main/CUDASetup.generate_instructions
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
3e706031150ac26fd4c84cac2976c52be14ead11
|
Merge pull request #905 from LucQueen/outofbounds
|
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
<0> if getattr(self, 'error', False): return
<1> print(self.error)
<2> self.error = True
<3> if not self.cuda_available:
<4> self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected or CUDA not installed.')
<5> self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.')
<6> self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:')
<7> self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null')
<8> self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a')
<9> self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc')
<10> self.add_log_entry('CUDA SETUP: Solution 3): For a missing CUDA runtime library (libcudart.so), use `find / -name libcudart.so* and follow with step (2b)')
<11> return
<12>
<13> if self.cudart_path is None:
<14> self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.')
<15> self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable')
<16> self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda</s>
|
===========below chunk 0===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 1
self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a')
self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from 1b into your .bashrc file, located at ~/.bashrc')
self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.')
self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/cuda_install.sh')
self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.')
self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local')
return
make_cmd = f'CUDA_VERSION={self.cuda_version_string}'
if len(self.cuda_version_string) < 3:
make_cmd += ' make cuda92'
elif self.cuda_version_string == '110':
make_cmd += ' make cuda110'
elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0:
make_cmd += ' make cuda11x'
elif self.cuda_version_string == '100':
self.add_log_entry('CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.')
self.add_log_entry('CU</s>
===========below chunk 1===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 2
<s>UP: CUDA 10.0 not supported. Please use a different CUDA version.')
self.add_log_entry('CUDA SETUP: Before you try again running bitsandbytes, make sure old CUDA 10.0 versions are uninstalled and removed from $LD_LIBRARY_PATH variables.')
return
has_cublaslt = is_cublasLt_compatible(self.cc)
if not has_cublaslt:
make_cmd += '_nomatmul'
self.add_log_entry('CUDA SETUP: Something unexpected happened. Please compile from source:')
self.add_log_entry('git clone https://github.com/TimDettmers/bitsandbytes.git')
self.add_log_entry('cd bitsandbytes')
self.add_log_entry(make_cmd)
self.add_log_entry('python setup.py install')
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.env_vars
def to_be_ignored(env_var: str, value: str) -> bool:
ignorable = {
"PWD", # PWD: this is how the shell keeps track of the current working dir
"OLDPWD",
"SSH_AUTH_SOCK", # SSH stuff, therefore unrelated
"SSH_TTY",
"GOOGLE_VM_CONFIG_LOCK_FILE", # on GCP setups, requires elevated permissions, causing problems in Jupyter notebooks
"HOME", # Linux shell default
"TMUX", # Terminal Multiplexer
"XDG_DATA_DIRS", # XDG: Desktop environment stuff
"XDG_GREETER_DATA_DIR", # XDG: Desktop environment stuff
"XDG_RUNTIME_DIR",
"MAIL", # something related to emails
"SHELL", # binary for currently invoked shell
"DBUS_SESSION_BUS_ADDRESS", # hardware related
"PATH", # this is for finding binaries, not libraries
"LESSOPEN", # related to the `less` command
"LESSCLOSE",
+ "GOOGLE_VM_CONFIG_LOCK_FILE", # Google Cloud stuff, contains root only paths
"_", # current Python interpreter
}
return env_var in ignorable
|
|
bitsandbytes.cuda_setup.main/remove_non_existent_dirs
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
3e706031150ac26fd4c84cac2976c52be14ead11
|
Merge pull request #905 from LucQueen/outofbounds
|
<5>:<add> except PermissionError as pex:
<add> # Handle the PermissionError first as it is a subtype of OSError
<add> # https://docs.python.org/3/library/exceptions.html#exception-hierarchy
<add> pass
<8>:<del> except PermissionError as pex:
<9>:<del> pass
|
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
<0> existent_directories: Set[Path] = set()
<1> for path in candidate_paths:
<2> try:
<3> if path.exists():
<4> existent_directories.add(path)
<5> except OSError as exc:
<6> if exc.errno != errno.ENAMETOOLONG:
<7> raise exc
<8> except PermissionError as pex:
<9> pass
<10>
<11> non_existent_directories: Set[Path] = candidate_paths - existent_directories
<12> if non_existent_directories:
<13> CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
<14> f"be non-existent: {non_existent_directories}", is_warning=False)
<15>
<16> return existent_directories
<17>
|
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.env_vars
def to_be_ignored(env_var: str, value: str) -> bool:
ignorable = {
"PWD", # PWD: this is how the shell keeps track of the current working dir
"OLDPWD",
"SSH_AUTH_SOCK", # SSH stuff, therefore unrelated
"SSH_TTY",
"GOOGLE_VM_CONFIG_LOCK_FILE", # on GCP setups, requires elevated permissions, causing problems in Jupyter notebooks
"HOME", # Linux shell default
"TMUX", # Terminal Multiplexer
"XDG_DATA_DIRS", # XDG: Desktop environment stuff
"XDG_GREETER_DATA_DIR", # XDG: Desktop environment stuff
"XDG_RUNTIME_DIR",
"MAIL", # something related to emails
"SHELL", # binary for currently invoked shell
"DBUS_SESSION_BUS_ADDRESS", # hardware related
"PATH", # this is for finding binaries, not libraries
"LESSOPEN", # related to the `less` command
"LESSCLOSE",
+ "GOOGLE_VM_CONFIG_LOCK_FILE", # Google Cloud stuff, contains root only paths
"_", # current Python interpreter
}
return env_var in ignorable
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
if getattr(self, 'error', False): return
print(self.error)
self.error = True
if not self.cuda_available:
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected or CUDA not installed.')
self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.')
self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:')
self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a')
self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc')
self.add_log_entry('CUDA SETUP: Solution 3): For a missing CUDA runtime library (libcudart.so), use `find / -name libcudart.so* and follow with step (2b)')
return
if self.cudart_path is None:
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.')
self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable')
self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev</s>
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 1
<s>('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a')
self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from 1b into your .bashrc file, located at ~/.bashrc')
self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.')
+ self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://raw.githubusercontent.com/TimDettmers/bitsandbytes/main/cuda_install.sh')
- self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/cuda_install.sh')
self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.')
self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local')
+
return
make_cmd = f'CUDA_VERSION={self.cuda_version_string}'
if len(self.cuda_version_string) < 3:
make_cmd += ' make cuda92'
elif self.</s>
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 2
<s>_version_string == '110':
make_cmd += ' make cuda110'
elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0:
make_cmd += ' make cuda11x'
elif self.cuda_version_string == '100':
self.add_log_entry('CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.')
self.add_log_entry('CUDA SETUP: Before you try again running bitsandbytes, make sure old CUDA 10.0 versions are uninstalled and removed from $LD_LIBRARY_PATH variables.')
return
has_cublaslt = is_cublasLt_compatible(self.cc)
if not has_cublaslt:
make_cmd += '_nomatmul'
self.add_log_entry('CUDA SETUP: Something unexpected happened. Please compile from source:')
self.add_log_entry('git clone https://github.com/TimDettmers/bitsandbytes.git')
self.add_log_entry('cd bitsandbytes')
self.add_log_entry(make_cmd)
self.add_log_entry('python setup.py install')
|
bitsandbytes.cuda_setup.main/get_cuda_runtime_lib_paths
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
3e706031150ac26fd4c84cac2976c52be14ead11
|
Merge pull request #905 from LucQueen/outofbounds
|
<3>:<add> try:
<add> if (path / libname).is_file():
<del> if (path / libname).is_file():
<4>:<add> paths.add(path / libname)
<del> paths.add(path / libname)
<5>:<add> except PermissionError:
<add> pass
|
# module: bitsandbytes.cuda_setup.main
def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]:
<0> paths = set()
<1> for libname in CUDA_RUNTIME_LIBS:
<2> for path in candidate_paths:
<3> if (path / libname).is_file():
<4> paths.add(path / libname)
<5> return paths
<6>
|
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
+ except PermissionError as pex:
+ # Handle the PermissionError first as it is a subtype of OSError
+ # https://docs.python.org/3/library/exceptions.html#exception-hierarchy
+ pass
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
- except PermissionError as pex:
- pass
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
f"be non-existent: {non_existent_directories}", is_warning=False)
return existent_directories
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.env_vars
def to_be_ignored(env_var: str, value: str) -> bool:
ignorable = {
"PWD", # PWD: this is how the shell keeps track of the current working dir
"OLDPWD",
"SSH_AUTH_SOCK", # SSH stuff, therefore unrelated
"SSH_TTY",
"GOOGLE_VM_CONFIG_LOCK_FILE", # on GCP setups, requires elevated permissions, causing problems in Jupyter notebooks
"HOME", # Linux shell default
"TMUX", # Terminal Multiplexer
"XDG_DATA_DIRS", # XDG: Desktop environment stuff
"XDG_GREETER_DATA_DIR", # XDG: Desktop environment stuff
"XDG_RUNTIME_DIR",
"MAIL", # something related to emails
"SHELL", # binary for currently invoked shell
"DBUS_SESSION_BUS_ADDRESS", # hardware related
"PATH", # this is for finding binaries, not libraries
"LESSOPEN", # related to the `less` command
"LESSCLOSE",
+ "GOOGLE_VM_CONFIG_LOCK_FILE", # Google Cloud stuff, contains root only paths
"_", # current Python interpreter
}
return env_var in ignorable
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
if getattr(self, 'error', False): return
print(self.error)
self.error = True
if not self.cuda_available:
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected or CUDA not installed.')
self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.')
self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:')
self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a')
self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc')
self.add_log_entry('CUDA SETUP: Solution 3): For a missing CUDA runtime library (libcudart.so), use `find / -name libcudart.so* and follow with step (2b)')
return
if self.cudart_path is None:
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.')
self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable')
self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev</s>
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 1
<s>('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a')
self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from 1b into your .bashrc file, located at ~/.bashrc')
self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.')
+ self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://raw.githubusercontent.com/TimDettmers/bitsandbytes/main/cuda_install.sh')
- self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/cuda_install.sh')
self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.')
self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local')
+
return
make_cmd = f'CUDA_VERSION={self.cuda_version_string}'
if len(self.cuda_version_string) < 3:
make_cmd += ' make cuda92'
elif self.</s>
===========changed ref 4===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 2
<s>_version_string == '110':
make_cmd += ' make cuda110'
elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0:
make_cmd += ' make cuda11x'
elif self.cuda_version_string == '100':
self.add_log_entry('CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.')
self.add_log_entry('CUDA SETUP: Before you try again running bitsandbytes, make sure old CUDA 10.0 versions are uninstalled and removed from $LD_LIBRARY_PATH variables.')
return
has_cublaslt = is_cublasLt_compatible(self.cc)
if not has_cublaslt:
make_cmd += '_nomatmul'
self.add_log_entry('CUDA SETUP: Something unexpected happened. Please compile from source:')
self.add_log_entry('git clone https://github.com/TimDettmers/bitsandbytes.git')
self.add_log_entry('cd bitsandbytes')
self.add_log_entry(make_cmd)
self.add_log_entry('python setup.py install')
|
bitsandbytes.nn.modules/Linear4bit.set_compute_type
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
3e706031150ac26fd4c84cac2976c52be14ead11
|
Merge pull request #905 from LucQueen/outofbounds
|
<9>:<add> warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.')
<del> warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_type=torch.float32 (default). This will lead to slow inference.')
<12>:<add> warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.')
<del> warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_type=torch.float32 (default). This will lead to slow inference or training speed.')
|
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
def set_compute_type(self, x):
<0> if x.dtype in [torch.float32, torch.bfloat16]:
<1> # the input is in a dtype that is safe to compute in, we switch
<2> # to this type for speed and stability
<3> self.compute_dtype = x.dtype
<4> elif x.dtype == torch.float16:
<5> # we take the compoute dtype passed into the layer
<6> if self.compute_dtype == torch.float32 and (x.numel() == x.shape[-1]):
<7> # single batch inference with input torch.float16 and compute_dtype float32 -> slow inference when it could be fast
<8> # warn the user about this
<9> warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_type=torch.float32 (default). This will lead to slow inference.')
<10> warnings.filterwarnings('ignore', message='.*inference.')
<11> if self.compute_dtype == torch.float32 and (x.numel() != x.shape[-1]):
<12> warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_type=torch.float32 (default). This will lead to slow inference or training speed.')
<13> warnings.filterwarnings('ignore', message='.*inference or training')
<14>
|
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]:
paths = set()
for libname in CUDA_RUNTIME_LIBS:
for path in candidate_paths:
+ try:
+ if (path / libname).is_file():
- if (path / libname).is_file():
+ paths.add(path / libname)
- paths.add(path / libname)
+ except PermissionError:
+ pass
return paths
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
+ except PermissionError as pex:
+ # Handle the PermissionError first as it is a subtype of OSError
+ # https://docs.python.org/3/library/exceptions.html#exception-hierarchy
+ pass
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
- except PermissionError as pex:
- pass
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
f"be non-existent: {non_existent_directories}", is_warning=False)
return existent_directories
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.env_vars
def to_be_ignored(env_var: str, value: str) -> bool:
ignorable = {
"PWD", # PWD: this is how the shell keeps track of the current working dir
"OLDPWD",
"SSH_AUTH_SOCK", # SSH stuff, therefore unrelated
"SSH_TTY",
"GOOGLE_VM_CONFIG_LOCK_FILE", # on GCP setups, requires elevated permissions, causing problems in Jupyter notebooks
"HOME", # Linux shell default
"TMUX", # Terminal Multiplexer
"XDG_DATA_DIRS", # XDG: Desktop environment stuff
"XDG_GREETER_DATA_DIR", # XDG: Desktop environment stuff
"XDG_RUNTIME_DIR",
"MAIL", # something related to emails
"SHELL", # binary for currently invoked shell
"DBUS_SESSION_BUS_ADDRESS", # hardware related
"PATH", # this is for finding binaries, not libraries
"LESSOPEN", # related to the `less` command
"LESSCLOSE",
+ "GOOGLE_VM_CONFIG_LOCK_FILE", # Google Cloud stuff, contains root only paths
"_", # current Python interpreter
}
return env_var in ignorable
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
if getattr(self, 'error', False): return
print(self.error)
self.error = True
if not self.cuda_available:
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected or CUDA not installed.')
self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.')
self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:')
self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a')
self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc')
self.add_log_entry('CUDA SETUP: Solution 3): For a missing CUDA runtime library (libcudart.so), use `find / -name libcudart.so* and follow with step (2b)')
return
if self.cudart_path is None:
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.')
self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable')
self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev</s>
===========changed ref 4===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 1
<s>('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a')
self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from 1b into your .bashrc file, located at ~/.bashrc')
self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.')
+ self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://raw.githubusercontent.com/TimDettmers/bitsandbytes/main/cuda_install.sh')
- self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/cuda_install.sh')
self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.')
self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local')
+
return
make_cmd = f'CUDA_VERSION={self.cuda_version_string}'
if len(self.cuda_version_string) < 3:
make_cmd += ' make cuda92'
elif self.</s>
|
bitsandbytes.nn.triton_based_modules/_switchback_global.forward
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
3e706031150ac26fd4c84cac2976c52be14ead11
|
Merge pull request #905 from LucQueen/outofbounds
|
<12>:<add> return int8_matmul_mixed_dequantize(
<del> return int8_matmul_mixed_dequanitze(
|
# module: bitsandbytes.nn.triton_based_modules
class _switchback_global(torch.autograd.Function):
@staticmethod
def forward(ctx, X_3D, W, bias):
<0> # reshape input to [N * L, D]
<1> X = X_3D.view(-1, X_3D.size(-1))
<2>
<3> # rowwise quantize for X, global quantize for W
<4> X_int8, state_X = quantize_rowwise(X)
<5> W_int8, state_W = quantize_global(W)
<6>
<7> # save for backward.
<8> ctx.save_for_backward = X, W
<9>
<10> # matmult, fused dequant and add bias
<11> # call "mixed" because we are mixing rowwise quantized and global quantized
<12> return int8_matmul_mixed_dequanitze(
<13> X_int8, W_int8.t(), state_X, state_W, bias
<14> ).view(*X_3D.size()[:-1], -1)
<15>
|
===========changed ref 0===========
+ # module: bitsandbytes.triton.int8_matmul_mixed_dequantize
+ # This is a matmul kernel based on triton.ops.matmul
+ # It is modified to support rowwise quantized input and global quantized weight
+ # It's purpose is fused matmul then dequantize
+ # It does support bias.
+
+ def init_to_zero(name):
+ return lambda nargs: nargs[name].zero_()
+
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]:
paths = set()
for libname in CUDA_RUNTIME_LIBS:
for path in candidate_paths:
+ try:
+ if (path / libname).is_file():
- if (path / libname).is_file():
+ paths.add(path / libname)
- paths.add(path / libname)
+ except PermissionError:
+ pass
return paths
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
+ except PermissionError as pex:
+ # Handle the PermissionError first as it is a subtype of OSError
+ # https://docs.python.org/3/library/exceptions.html#exception-hierarchy
+ pass
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
- except PermissionError as pex:
- pass
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
f"be non-existent: {non_existent_directories}", is_warning=False)
return existent_directories
===========changed ref 3===========
+ # module: bitsandbytes.triton.int8_matmul_mixed_dequantize
+ def get_configs_io_bound():
+ configs = []
+ for num_stages in [2, 3, 4, 5, 6]:
+ for block_m in [16, 32]:
+ for block_k in [32, 64]:
+ for block_n in [32, 64, 128, 256]:
+ num_warps = 2 if block_n <= 64 else 4
+ configs.append(
+ triton.Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': 1},
+ num_stages=num_stages, num_warps=num_warps))
+ # split_k
+ for split_k in [2, 4, 8, 16]:
+ configs.append(triton.Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': split_k},
+ num_stages=num_stages, num_warps=num_warps, pre_hook=init_to_zero('C')))
+ return configs
+
===========changed ref 4===========
# module: bitsandbytes.cuda_setup.env_vars
def to_be_ignored(env_var: str, value: str) -> bool:
ignorable = {
"PWD", # PWD: this is how the shell keeps track of the current working dir
"OLDPWD",
"SSH_AUTH_SOCK", # SSH stuff, therefore unrelated
"SSH_TTY",
"GOOGLE_VM_CONFIG_LOCK_FILE", # on GCP setups, requires elevated permissions, causing problems in Jupyter notebooks
"HOME", # Linux shell default
"TMUX", # Terminal Multiplexer
"XDG_DATA_DIRS", # XDG: Desktop environment stuff
"XDG_GREETER_DATA_DIR", # XDG: Desktop environment stuff
"XDG_RUNTIME_DIR",
"MAIL", # something related to emails
"SHELL", # binary for currently invoked shell
"DBUS_SESSION_BUS_ADDRESS", # hardware related
"PATH", # this is for finding binaries, not libraries
"LESSOPEN", # related to the `less` command
"LESSCLOSE",
+ "GOOGLE_VM_CONFIG_LOCK_FILE", # Google Cloud stuff, contains root only paths
"_", # current Python interpreter
}
return env_var in ignorable
===========changed ref 5===========
+ # module: bitsandbytes.triton.int8_matmul_mixed_dequantize
+ def int8_matmul_mixed_dequantize(a, b, state_x, state_w, bias):
+ device = a.device
+ divfactor = 1. / (127. * 127.)
+ has_bias = 0 if bias is None else 1
+ # handle non-contiguous inputs if necessary
+ if a.stride(0) > 1 and a.stride(1) > 1:
+ a = a.contiguous()
+ if b.stride(0) > 1 and b.stride(1) > 1:
+ b = b.contiguous()
+ # checks constraints
+ assert a.shape[1] == b.shape[0], "incompatible dimensions"
+ M, K = a.shape
+ _, N = b.shape
+ # allocates output
+ c = torch.empty((M, N), device=device, dtype=torch.float16)
+ # accumulator types
+ ACC_TYPE = tl.float32 #if a.dtype in [torch.float16, torch.bfloat16, torch.float32] else tl.int32
+ # launch int8_matmul_mixed_dequantize kernel
+ grid = lambda META: (triton.cdiv(M, META['BLOCK_M']) * triton.cdiv(N, META['BLOCK_N']), META['SPLIT_K'])
+ _int8_matmul_mixed_dequantize[grid](a, b, c, bias, state_x, state_w, M, N, K, divfactor, has_bias,
+ a.stride(0), a.stride(1),
+ b.stride(0), b.stride(1),
+ c.stride(0), c.stride(1),
+ GROUP_M=8, ACC_TYPE=ACC_TYPE)
+ return c
+
|
bitsandbytes.nn.triton_based_modules/_switchback_global.backward
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
3e706031150ac26fd4c84cac2976c52be14ead11
|
Merge pull request #905 from LucQueen/outofbounds
|
<12>:<add> grad_X = int8_matmul_mixed_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(
<del> grad_X = int8_matmul_mixed_dequanitze(G_int8, W_int8.t(), state_G, state_W, None).view(
|
# module: bitsandbytes.nn.triton_based_modules
class _switchback_global(torch.autograd.Function):
@staticmethod
def backward(ctx, G_3D):
<0> # reshape input to [N_out * L, D]
<1> G = G_3D.reshape(-1, G_3D.size(-1))
<2>
<3> grad_X = grad_W = grad_bias = None
<4>
<5> X, W = ctx.save_for_backward
<6> if ctx.needs_input_grad[0]:
<7> # rowwise quantize for G, global quantize for W
<8> # for W, we also fuse the transpose operation because only A @ B^T is supported
<9> # so we transpose once then call .t() in the matmul
<10> G_int8, state_G = quantize_rowwise(G)
<11> W_int8, state_W = quantize_global_transpose(W)
<12> grad_X = int8_matmul_mixed_dequanitze(G_int8, W_int8.t(), state_G, state_W, None).view(
<13> *G_3D.size()[:-1], -1
<14> )
<15> if ctx.needs_input_grad[1]:
<16> # backward pass uses standard weight grad
<17> grad_W = torch.matmul(G.t(), X.to(G.dtype))
<18> if ctx.needs_input_grad[2]:
<19> grad_bias = G.sum(dim=0)
<20>
<21> return grad_X, grad_W, grad_bias
<22>
|
===========changed ref 0===========
# module: bitsandbytes.nn.triton_based_modules
class _switchback_global(torch.autograd.Function):
@staticmethod
def forward(ctx, X_3D, W, bias):
# reshape input to [N * L, D]
X = X_3D.view(-1, X_3D.size(-1))
# rowwise quantize for X, global quantize for W
X_int8, state_X = quantize_rowwise(X)
W_int8, state_W = quantize_global(W)
# save for backward.
ctx.save_for_backward = X, W
# matmult, fused dequant and add bias
# call "mixed" because we are mixing rowwise quantized and global quantized
+ return int8_matmul_mixed_dequantize(
- return int8_matmul_mixed_dequanitze(
X_int8, W_int8.t(), state_X, state_W, bias
).view(*X_3D.size()[:-1], -1)
===========changed ref 1===========
+ # module: bitsandbytes.triton.int8_matmul_mixed_dequantize
+ # This is a matmul kernel based on triton.ops.matmul
+ # It is modified to support rowwise quantized input and global quantized weight
+ # It's purpose is fused matmul then dequantize
+ # It does support bias.
+
+ def init_to_zero(name):
+ return lambda nargs: nargs[name].zero_()
+
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]:
paths = set()
for libname in CUDA_RUNTIME_LIBS:
for path in candidate_paths:
+ try:
+ if (path / libname).is_file():
- if (path / libname).is_file():
+ paths.add(path / libname)
- paths.add(path / libname)
+ except PermissionError:
+ pass
return paths
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
+ except PermissionError as pex:
+ # Handle the PermissionError first as it is a subtype of OSError
+ # https://docs.python.org/3/library/exceptions.html#exception-hierarchy
+ pass
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
- except PermissionError as pex:
- pass
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
f"be non-existent: {non_existent_directories}", is_warning=False)
return existent_directories
===========changed ref 4===========
+ # module: bitsandbytes.triton.int8_matmul_mixed_dequantize
+ def get_configs_io_bound():
+ configs = []
+ for num_stages in [2, 3, 4, 5, 6]:
+ for block_m in [16, 32]:
+ for block_k in [32, 64]:
+ for block_n in [32, 64, 128, 256]:
+ num_warps = 2 if block_n <= 64 else 4
+ configs.append(
+ triton.Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': 1},
+ num_stages=num_stages, num_warps=num_warps))
+ # split_k
+ for split_k in [2, 4, 8, 16]:
+ configs.append(triton.Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': split_k},
+ num_stages=num_stages, num_warps=num_warps, pre_hook=init_to_zero('C')))
+ return configs
+
===========changed ref 5===========
# module: bitsandbytes.cuda_setup.env_vars
def to_be_ignored(env_var: str, value: str) -> bool:
ignorable = {
"PWD", # PWD: this is how the shell keeps track of the current working dir
"OLDPWD",
"SSH_AUTH_SOCK", # SSH stuff, therefore unrelated
"SSH_TTY",
"GOOGLE_VM_CONFIG_LOCK_FILE", # on GCP setups, requires elevated permissions, causing problems in Jupyter notebooks
"HOME", # Linux shell default
"TMUX", # Terminal Multiplexer
"XDG_DATA_DIRS", # XDG: Desktop environment stuff
"XDG_GREETER_DATA_DIR", # XDG: Desktop environment stuff
"XDG_RUNTIME_DIR",
"MAIL", # something related to emails
"SHELL", # binary for currently invoked shell
"DBUS_SESSION_BUS_ADDRESS", # hardware related
"PATH", # this is for finding binaries, not libraries
"LESSOPEN", # related to the `less` command
"LESSCLOSE",
+ "GOOGLE_VM_CONFIG_LOCK_FILE", # Google Cloud stuff, contains root only paths
"_", # current Python interpreter
}
return env_var in ignorable
===========changed ref 6===========
+ # module: bitsandbytes.triton.int8_matmul_mixed_dequantize
+ def int8_matmul_mixed_dequantize(a, b, state_x, state_w, bias):
+ device = a.device
+ divfactor = 1. / (127. * 127.)
+ has_bias = 0 if bias is None else 1
+ # handle non-contiguous inputs if necessary
+ if a.stride(0) > 1 and a.stride(1) > 1:
+ a = a.contiguous()
+ if b.stride(0) > 1 and b.stride(1) > 1:
+ b = b.contiguous()
+ # checks constraints
+ assert a.shape[1] == b.shape[0], "incompatible dimensions"
+ M, K = a.shape
+ _, N = b.shape
+ # allocates output
+ c = torch.empty((M, N), device=device, dtype=torch.float16)
+ # accumulator types
+ ACC_TYPE = tl.float32 #if a.dtype in [torch.float16, torch.bfloat16, torch.float32] else tl.int32
+ # launch int8_matmul_mixed_dequantize kernel
+ grid = lambda META: (triton.cdiv(M, META['BLOCK_M']) * triton.cdiv(N, META['BLOCK_N']), META['SPLIT_K'])
+ _int8_matmul_mixed_dequantize[grid](a, b, c, bias, state_x, state_w, M, N, K, divfactor, has_bias,
+ a.stride(0), a.stride(1),
+ b.stride(0), b.stride(1),
+ c.stride(0), c.stride(1),
+ GROUP_M=8, ACC_TYPE=ACC_TYPE)
+ return c
+
|
bitsandbytes.nn.triton_based_modules/_switchback_global_mem_efficient.forward
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
3e706031150ac26fd4c84cac2976c52be14ead11
|
Merge pull request #905 from LucQueen/outofbounds
|
<14>:<add> return int8_matmul_mixed_dequantize(
<del> return int8_matmul_mixed_dequanitze(
|
# module: bitsandbytes.nn.triton_based_modules
class _switchback_global_mem_efficient(torch.autograd.Function):
@staticmethod
def forward(ctx, X_3D, W, bias):
<0> # reshape input to [N * L, D]
<1> X = X_3D.view(-1, X_3D.size(-1))
<2> X_3D_sz = X_3D.size()
<3>
<4> # rowwise quantize for X, global quantize for W
<5> X_int8, state_X = quantize_rowwise(X)
<6> del X
<7> W_int8, state_W = quantize_global(W)
<8>
<9> # save for backward.
<10> ctx.save_for_backward = X_int8, state_X, W_int8, state_W
<11>
<12> # matmult, fused dequant and add bias
<13> # call "mixed" because we are mixing rowwise quantized and global quantized
<14> return int8_matmul_mixed_dequanitze(
<15> X_int8, W_int8.t(), state_X, state_W, bias
<16> ).view(*X_3D_sz[:-1], -1)
<17>
|
===========changed ref 0===========
# module: bitsandbytes.nn.triton_based_modules
class _switchback_global(torch.autograd.Function):
@staticmethod
def forward(ctx, X_3D, W, bias):
# reshape input to [N * L, D]
X = X_3D.view(-1, X_3D.size(-1))
# rowwise quantize for X, global quantize for W
X_int8, state_X = quantize_rowwise(X)
W_int8, state_W = quantize_global(W)
# save for backward.
ctx.save_for_backward = X, W
# matmult, fused dequant and add bias
# call "mixed" because we are mixing rowwise quantized and global quantized
+ return int8_matmul_mixed_dequantize(
- return int8_matmul_mixed_dequanitze(
X_int8, W_int8.t(), state_X, state_W, bias
).view(*X_3D.size()[:-1], -1)
===========changed ref 1===========
# module: bitsandbytes.nn.triton_based_modules
class _switchback_global(torch.autograd.Function):
@staticmethod
def backward(ctx, G_3D):
# reshape input to [N_out * L, D]
G = G_3D.reshape(-1, G_3D.size(-1))
grad_X = grad_W = grad_bias = None
X, W = ctx.save_for_backward
if ctx.needs_input_grad[0]:
# rowwise quantize for G, global quantize for W
# for W, we also fuse the transpose operation because only A @ B^T is supported
# so we transpose once then call .t() in the matmul
G_int8, state_G = quantize_rowwise(G)
W_int8, state_W = quantize_global_transpose(W)
+ grad_X = int8_matmul_mixed_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(
- grad_X = int8_matmul_mixed_dequanitze(G_int8, W_int8.t(), state_G, state_W, None).view(
*G_3D.size()[:-1], -1
)
if ctx.needs_input_grad[1]:
# backward pass uses standard weight grad
grad_W = torch.matmul(G.t(), X.to(G.dtype))
if ctx.needs_input_grad[2]:
grad_bias = G.sum(dim=0)
return grad_X, grad_W, grad_bias
===========changed ref 2===========
+ # module: bitsandbytes.triton.int8_matmul_mixed_dequantize
+ # This is a matmul kernel based on triton.ops.matmul
+ # It is modified to support rowwise quantized input and global quantized weight
+ # It's purpose is fused matmul then dequantize
+ # It does support bias.
+
+ def init_to_zero(name):
+ return lambda nargs: nargs[name].zero_()
+
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]:
paths = set()
for libname in CUDA_RUNTIME_LIBS:
for path in candidate_paths:
+ try:
+ if (path / libname).is_file():
- if (path / libname).is_file():
+ paths.add(path / libname)
- paths.add(path / libname)
+ except PermissionError:
+ pass
return paths
===========changed ref 4===========
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
+ except PermissionError as pex:
+ # Handle the PermissionError first as it is a subtype of OSError
+ # https://docs.python.org/3/library/exceptions.html#exception-hierarchy
+ pass
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
- except PermissionError as pex:
- pass
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
f"be non-existent: {non_existent_directories}", is_warning=False)
return existent_directories
===========changed ref 5===========
+ # module: bitsandbytes.triton.int8_matmul_mixed_dequantize
+ def get_configs_io_bound():
+ configs = []
+ for num_stages in [2, 3, 4, 5, 6]:
+ for block_m in [16, 32]:
+ for block_k in [32, 64]:
+ for block_n in [32, 64, 128, 256]:
+ num_warps = 2 if block_n <= 64 else 4
+ configs.append(
+ triton.Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': 1},
+ num_stages=num_stages, num_warps=num_warps))
+ # split_k
+ for split_k in [2, 4, 8, 16]:
+ configs.append(triton.Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': split_k},
+ num_stages=num_stages, num_warps=num_warps, pre_hook=init_to_zero('C')))
+ return configs
+
===========changed ref 6===========
# module: bitsandbytes.cuda_setup.env_vars
def to_be_ignored(env_var: str, value: str) -> bool:
ignorable = {
"PWD", # PWD: this is how the shell keeps track of the current working dir
"OLDPWD",
"SSH_AUTH_SOCK", # SSH stuff, therefore unrelated
"SSH_TTY",
"GOOGLE_VM_CONFIG_LOCK_FILE", # on GCP setups, requires elevated permissions, causing problems in Jupyter notebooks
"HOME", # Linux shell default
"TMUX", # Terminal Multiplexer
"XDG_DATA_DIRS", # XDG: Desktop environment stuff
"XDG_GREETER_DATA_DIR", # XDG: Desktop environment stuff
"XDG_RUNTIME_DIR",
"MAIL", # something related to emails
"SHELL", # binary for currently invoked shell
"DBUS_SESSION_BUS_ADDRESS", # hardware related
"PATH", # this is for finding binaries, not libraries
"LESSOPEN", # related to the `less` command
"LESSCLOSE",
+ "GOOGLE_VM_CONFIG_LOCK_FILE", # Google Cloud stuff, contains root only paths
"_", # current Python interpreter
}
return env_var in ignorable
|
bitsandbytes.nn.triton_based_modules/_switchback_global_mem_efficient.backward
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
3e706031150ac26fd4c84cac2976c52be14ead11
|
Merge pull request #905 from LucQueen/outofbounds
|
<18>:<add> grad_X = int8_matmul_mixed_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(
<del> grad_X = int8_matmul_mixed_dequanitze(G_int8, W_int8.t(), state_G, state_W, None).view(
|
# module: bitsandbytes.nn.triton_based_modules
class _switchback_global_mem_efficient(torch.autograd.Function):
@staticmethod
def backward(ctx, G_3D):
<0> # reshape input to [N_out * L, D]
<1> G = G_3D.reshape(-1, G_3D.size(-1))
<2> G_3D_sz = G_3D.size()
<3>
<4> grad_X = grad_W = grad_bias = None
<5>
<6> X_int8, state_X, W_int8, state_W = ctx.save_for_backward
<7> if ctx.needs_input_grad[1]:
<8> real_X = dequantize_rowwise(X_int8, state_X)
<9> del X_int8
<10> grad_W = torch.matmul(G.t(), real_X.to(G.dtype))
<11> del real_X
<12> if ctx.needs_input_grad[2]:
<13> grad_bias = G.sum(dim=0)
<14> if ctx.needs_input_grad[0]:
<15> G_int8, state_G = quantize_rowwise(G)
<16> del G
<17> W_int8 = W_int8.t().contiguous()
<18> grad_X = int8_matmul_mixed_dequanitze(G_int8, W_int8.t(), state_G, state_W, None).view(
<19> *G_3D_sz[:-1], -1
<20> )
<21>
<22> return grad_X, grad_W, grad_bias
<23>
|
===========changed ref 0===========
# module: bitsandbytes.nn.triton_based_modules
class _switchback_global_mem_efficient(torch.autograd.Function):
@staticmethod
def forward(ctx, X_3D, W, bias):
# reshape input to [N * L, D]
X = X_3D.view(-1, X_3D.size(-1))
X_3D_sz = X_3D.size()
# rowwise quantize for X, global quantize for W
X_int8, state_X = quantize_rowwise(X)
del X
W_int8, state_W = quantize_global(W)
# save for backward.
ctx.save_for_backward = X_int8, state_X, W_int8, state_W
# matmult, fused dequant and add bias
# call "mixed" because we are mixing rowwise quantized and global quantized
+ return int8_matmul_mixed_dequantize(
- return int8_matmul_mixed_dequanitze(
X_int8, W_int8.t(), state_X, state_W, bias
).view(*X_3D_sz[:-1], -1)
===========changed ref 1===========
# module: bitsandbytes.nn.triton_based_modules
class _switchback_global(torch.autograd.Function):
@staticmethod
def forward(ctx, X_3D, W, bias):
# reshape input to [N * L, D]
X = X_3D.view(-1, X_3D.size(-1))
# rowwise quantize for X, global quantize for W
X_int8, state_X = quantize_rowwise(X)
W_int8, state_W = quantize_global(W)
# save for backward.
ctx.save_for_backward = X, W
# matmult, fused dequant and add bias
# call "mixed" because we are mixing rowwise quantized and global quantized
+ return int8_matmul_mixed_dequantize(
- return int8_matmul_mixed_dequanitze(
X_int8, W_int8.t(), state_X, state_W, bias
).view(*X_3D.size()[:-1], -1)
===========changed ref 2===========
# module: bitsandbytes.nn.triton_based_modules
class _switchback_global(torch.autograd.Function):
@staticmethod
def backward(ctx, G_3D):
# reshape input to [N_out * L, D]
G = G_3D.reshape(-1, G_3D.size(-1))
grad_X = grad_W = grad_bias = None
X, W = ctx.save_for_backward
if ctx.needs_input_grad[0]:
# rowwise quantize for G, global quantize for W
# for W, we also fuse the transpose operation because only A @ B^T is supported
# so we transpose once then call .t() in the matmul
G_int8, state_G = quantize_rowwise(G)
W_int8, state_W = quantize_global_transpose(W)
+ grad_X = int8_matmul_mixed_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(
- grad_X = int8_matmul_mixed_dequanitze(G_int8, W_int8.t(), state_G, state_W, None).view(
*G_3D.size()[:-1], -1
)
if ctx.needs_input_grad[1]:
# backward pass uses standard weight grad
grad_W = torch.matmul(G.t(), X.to(G.dtype))
if ctx.needs_input_grad[2]:
grad_bias = G.sum(dim=0)
return grad_X, grad_W, grad_bias
===========changed ref 3===========
+ # module: bitsandbytes.triton.int8_matmul_mixed_dequantize
+ # This is a matmul kernel based on triton.ops.matmul
+ # It is modified to support rowwise quantized input and global quantized weight
+ # It's purpose is fused matmul then dequantize
+ # It does support bias.
+
+ def init_to_zero(name):
+ return lambda nargs: nargs[name].zero_()
+
===========changed ref 4===========
# module: bitsandbytes.cuda_setup.main
def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]:
paths = set()
for libname in CUDA_RUNTIME_LIBS:
for path in candidate_paths:
+ try:
+ if (path / libname).is_file():
- if (path / libname).is_file():
+ paths.add(path / libname)
- paths.add(path / libname)
+ except PermissionError:
+ pass
return paths
===========changed ref 5===========
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
+ except PermissionError as pex:
+ # Handle the PermissionError first as it is a subtype of OSError
+ # https://docs.python.org/3/library/exceptions.html#exception-hierarchy
+ pass
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
- except PermissionError as pex:
- pass
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
f"be non-existent: {non_existent_directories}", is_warning=False)
return existent_directories
===========changed ref 6===========
+ # module: bitsandbytes.triton.int8_matmul_mixed_dequantize
+ def get_configs_io_bound():
+ configs = []
+ for num_stages in [2, 3, 4, 5, 6]:
+ for block_m in [16, 32]:
+ for block_k in [32, 64]:
+ for block_n in [32, 64, 128, 256]:
+ num_warps = 2 if block_n <= 64 else 4
+ configs.append(
+ triton.Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': 1},
+ num_stages=num_stages, num_warps=num_warps))
+ # split_k
+ for split_k in [2, 4, 8, 16]:
+ configs.append(triton.Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': split_k},
+ num_stages=num_stages, num_warps=num_warps, pre_hook=init_to_zero('C')))
+ return configs
+
|
bitsandbytes.nn.triton_based_modules/SwitchBackLinear.forward
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
3e706031150ac26fd4c84cac2976c52be14ead11
|
Merge pull request #905 from LucQueen/outofbounds
|
<16>:<add> return int8_matmul_mixed_dequantize(
<del> return int8_matmul_mixed_dequanitze(
|
# module: bitsandbytes.nn.triton_based_modules
class SwitchBackLinear(nn.Linear):
def forward(self, x):
<0> if self.training:
<1> return self._fn.apply(x, self.weight, self.bias)
<2> else:
<3> # If it hasn't been "prepared for eval", run the standard forward pass.
<4> if not hasattr(self, "W_int8"):
<5> return self._fn.apply(x, self.weight, self.bias)
<6>
<7> # Otherwise, use pre-computed weights.
<8> X = x.view(-1, x.size(-1))
<9> X_int8, state_X = quantize_rowwise(X)
<10>
<11> if self.vector_wise_quantization:
<12> return int8_matmul_rowwise_dequantize(
<13> X_int8, self.W_int8.t(), state_X, self.state_W, self.bias
<14> ).view(*x.size()[:-1], -1)
<15> else:
<16> return int8_matmul_mixed_dequanitze(
<17> X_int8, self.W_int8.t(), state_X, self.state_W, self.bias
<18> ).view(*x.size()[:-1], -1)
<19>
|
===========changed ref 0===========
# module: bitsandbytes.nn.triton_based_modules
class _switchback_global_mem_efficient(torch.autograd.Function):
@staticmethod
def forward(ctx, X_3D, W, bias):
# reshape input to [N * L, D]
X = X_3D.view(-1, X_3D.size(-1))
X_3D_sz = X_3D.size()
# rowwise quantize for X, global quantize for W
X_int8, state_X = quantize_rowwise(X)
del X
W_int8, state_W = quantize_global(W)
# save for backward.
ctx.save_for_backward = X_int8, state_X, W_int8, state_W
# matmult, fused dequant and add bias
# call "mixed" because we are mixing rowwise quantized and global quantized
+ return int8_matmul_mixed_dequantize(
- return int8_matmul_mixed_dequanitze(
X_int8, W_int8.t(), state_X, state_W, bias
).view(*X_3D_sz[:-1], -1)
===========changed ref 1===========
# module: bitsandbytes.nn.triton_based_modules
class _switchback_global(torch.autograd.Function):
@staticmethod
def forward(ctx, X_3D, W, bias):
# reshape input to [N * L, D]
X = X_3D.view(-1, X_3D.size(-1))
# rowwise quantize for X, global quantize for W
X_int8, state_X = quantize_rowwise(X)
W_int8, state_W = quantize_global(W)
# save for backward.
ctx.save_for_backward = X, W
# matmult, fused dequant and add bias
# call "mixed" because we are mixing rowwise quantized and global quantized
+ return int8_matmul_mixed_dequantize(
- return int8_matmul_mixed_dequanitze(
X_int8, W_int8.t(), state_X, state_W, bias
).view(*X_3D.size()[:-1], -1)
===========changed ref 2===========
# module: bitsandbytes.nn.triton_based_modules
class _switchback_global_mem_efficient(torch.autograd.Function):
@staticmethod
def backward(ctx, G_3D):
# reshape input to [N_out * L, D]
G = G_3D.reshape(-1, G_3D.size(-1))
G_3D_sz = G_3D.size()
grad_X = grad_W = grad_bias = None
X_int8, state_X, W_int8, state_W = ctx.save_for_backward
if ctx.needs_input_grad[1]:
real_X = dequantize_rowwise(X_int8, state_X)
del X_int8
grad_W = torch.matmul(G.t(), real_X.to(G.dtype))
del real_X
if ctx.needs_input_grad[2]:
grad_bias = G.sum(dim=0)
if ctx.needs_input_grad[0]:
G_int8, state_G = quantize_rowwise(G)
del G
W_int8 = W_int8.t().contiguous()
+ grad_X = int8_matmul_mixed_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(
- grad_X = int8_matmul_mixed_dequanitze(G_int8, W_int8.t(), state_G, state_W, None).view(
*G_3D_sz[:-1], -1
)
return grad_X, grad_W, grad_bias
===========changed ref 3===========
# module: bitsandbytes.nn.triton_based_modules
class _switchback_global(torch.autograd.Function):
@staticmethod
def backward(ctx, G_3D):
# reshape input to [N_out * L, D]
G = G_3D.reshape(-1, G_3D.size(-1))
grad_X = grad_W = grad_bias = None
X, W = ctx.save_for_backward
if ctx.needs_input_grad[0]:
# rowwise quantize for G, global quantize for W
# for W, we also fuse the transpose operation because only A @ B^T is supported
# so we transpose once then call .t() in the matmul
G_int8, state_G = quantize_rowwise(G)
W_int8, state_W = quantize_global_transpose(W)
+ grad_X = int8_matmul_mixed_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(
- grad_X = int8_matmul_mixed_dequanitze(G_int8, W_int8.t(), state_G, state_W, None).view(
*G_3D.size()[:-1], -1
)
if ctx.needs_input_grad[1]:
# backward pass uses standard weight grad
grad_W = torch.matmul(G.t(), X.to(G.dtype))
if ctx.needs_input_grad[2]:
grad_bias = G.sum(dim=0)
return grad_X, grad_W, grad_bias
===========changed ref 4===========
+ # module: bitsandbytes.triton.int8_matmul_mixed_dequantize
+ # This is a matmul kernel based on triton.ops.matmul
+ # It is modified to support rowwise quantized input and global quantized weight
+ # It's purpose is fused matmul then dequantize
+ # It does support bias.
+
+ def init_to_zero(name):
+ return lambda nargs: nargs[name].zero_()
+
===========changed ref 5===========
# module: bitsandbytes.cuda_setup.main
def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]:
paths = set()
for libname in CUDA_RUNTIME_LIBS:
for path in candidate_paths:
+ try:
+ if (path / libname).is_file():
- if (path / libname).is_file():
+ paths.add(path / libname)
- paths.add(path / libname)
+ except PermissionError:
+ pass
return paths
===========changed ref 6===========
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
+ except PermissionError as pex:
+ # Handle the PermissionError first as it is a subtype of OSError
+ # https://docs.python.org/3/library/exceptions.html#exception-hierarchy
+ pass
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
- except PermissionError as pex:
- pass
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
f"be non-existent: {non_existent_directories}", is_warning=False)
return existent_directories
|
bitsandbytes.functional/create_normal_map
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
4870580f17767a4165ec05d954dff2b5c25b694d
|
Fixed bnb input in setup.py. Bumped version for release.
|
<0>:<add> from scipy.stats import norm
<add>
|
# module: bitsandbytes.functional
def create_normal_map(offset=0.9677083, use_extra_value=True):
<0> if use_extra_value:
<1> # one more positive value, this is an asymmetric type
<2> v1 = norm.ppf(torch.linspace(offset, 0.5, 9)[:-1]).tolist()
<3> v2 = [0]*(256-15) ## we have 15 non-zero values in this data type
<4> v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
<5> else:
<6> v1 = norm.ppf(torch.linspace(offset, 0.5, 8)[:-1]).tolist()
<7> v2 = [0]*(256-14) ## we have 14 non-zero values in this data type
<8> v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
<9>
<10> v = v1 + v2 + v3
<11>
<12> values = torch.Tensor(v)
<13> values = values.sort().values
<14> values /= values.max()
<15>
<16> assert values.numel() == 256
<17>
<18> return values
<19>
|
===========unchanged ref 0===========
at: torch._C._VariableFunctions
linspace(start: Number, end: Number, steps: Optional[_int]=None, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor
linspace(start: Union[Number, _complex], end: Union[Number, _complex], steps: _int, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
===========changed ref 0===========
# module: bitsandbytes
if COMPILED_WITH_CUDA:
from .optim import adam
__pdoc__ = {
"libbitsandbytes": False,
"optim.optimizer.Optimizer8bit": False,
"optim.optimizer.MockArgs": False,
}
+ __version__ = "0.42.0"
- __version__ = "0.41.3.post1"
PACKAGE_GITHUB_URL = "https://github.com/TimDettmers/bitsandbytes"
|
tests.test_cuda_setup_evaluator/test_manual_override
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
4870580f17767a4165ec05d954dff2b5c25b694d
|
Fixed bnb input in setup.py. Bumped version for release.
|
<7>:<add> os.environ['BNB_CUDA_VERSION']='122'
<del> os.environ['CUDA_VERSION']='122'
<8>:<add> #assert str(manual_cuda_path) in os.environ['LD_LIBRARY_PATH']
<del> assert str(manual_cuda_path) in os.environ['LD_LIBRARY_PATH']
<11>:<del> assert loaded_lib == 'libbitsandbytes_cuda122.so'
|
# module: tests.test_cuda_setup_evaluator
# hardcoded test. Not good, but a sanity check for now
+ # TODO: improve this
def test_manual_override():
<0> manual_cuda_path = str(Path('/mmfs1/home/dettmers/data/local/cuda-12.2'))
<1>
<2> pytorch_version = torch.version.cuda.replace('.', '')
<3>
<4> assert pytorch_version != 122
<5>
<6> os.environ['CUDA_HOME']='{manual_cuda_path}'
<7> os.environ['CUDA_VERSION']='122'
<8> assert str(manual_cuda_path) in os.environ['LD_LIBRARY_PATH']
<9> import bitsandbytes as bnb
<10> loaded_lib = bnb.cuda_setup.main.CUDASetup.get_instance().binary_name
<11> assert loaded_lib == 'libbitsandbytes_cuda122.so'
<12>
|
===========unchanged ref 0===========
at: bitsandbytes.cuda_setup.main
CUDASetup()
at: bitsandbytes.cuda_setup.main.CUDASetup
_instance = None
get_instance()
at: os
environ = _createenviron()
at: pathlib
Path()
at: torch.version
cuda = None
===========changed ref 0===========
# module: bitsandbytes
if COMPILED_WITH_CUDA:
from .optim import adam
__pdoc__ = {
"libbitsandbytes": False,
"optim.optimizer.Optimizer8bit": False,
"optim.optimizer.MockArgs": False,
}
+ __version__ = "0.42.0"
- __version__ = "0.41.3.post1"
PACKAGE_GITHUB_URL = "https://github.com/TimDettmers/bitsandbytes"
===========changed ref 1===========
# module: bitsandbytes.functional
def create_normal_map(offset=0.9677083, use_extra_value=True):
+ from scipy.stats import norm
+
if use_extra_value:
# one more positive value, this is an asymmetric type
v1 = norm.ppf(torch.linspace(offset, 0.5, 9)[:-1]).tolist()
v2 = [0]*(256-15) ## we have 15 non-zero values in this data type
v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
else:
v1 = norm.ppf(torch.linspace(offset, 0.5, 8)[:-1]).tolist()
v2 = [0]*(256-14) ## we have 14 non-zero values in this data type
v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
v = v1 + v2 + v3
values = torch.Tensor(v)
values = values.sort().values
values /= values.max()
assert values.numel() == 256
return values
|
tests.test_functional/test_zeropoint
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
4870580f17767a4165ec05d954dff2b5c25b694d
|
Fixed bnb input in setup.py. Bumped version for release.
|
<s> #torch.cuda.synchronize()
#t0 = time.time()
#for i in range(iters):
# linear8bit_train(A)
#torch.cuda.synchronize()
#print( f"bnb linear8bitlt with threshold (training): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
def test_zeropoint():
<0> def quant_zp(x):
<1> dtype = x.dtype
<2> x = x.float()
<3> dyna = x.max() - x.min()
<4> if dyna == 0:
<5> dyna = 1
<6> qx = 254.0 / dyna
<7> minx = x.min()
<8> # zpx = torch.round(minx* qx)
<9> # zpx = 127 - torch.round(x.max()* qx)
<10> zpx = torch.round(x.min() * qx) - 127
<11> x = (qx * x) + zpx
<12> return x, qx, zpx
<13>
<14> batch = 2
<15> seq = 512
<16> model = 1024
<17> hidden = 4 * model
<18> A = torch.randn(batch * seq, model, device="cuda").half() * 0.1
<19> B = torch.randn(model, hidden, device="cuda").half() * 0.1
<20>
<21> C0 = torch.matmul(A, B)
<22>
<23> # A, SA = F.vectorwise_quant(A, quant_type='linear')
<24> # B, SB = F.vectorwise_quant(B, quant_type='linear')
<25> A = A.float()
<26> B = B.float()
<27>
<28> C1 = torch.matmul(A, B)
<29> C3 = bnb.matmul(A.half(), B.t().contiguous().half())
<30>
<31> zp = 1
<32> # C2 =</s>
|
===========below chunk 0===========
<s>cuda.synchronize()
#t0 = time.time()
#for i in range(iters):
# linear8bit_train(A)
#torch.cuda.synchronize()
#print( f"bnb linear8bitlt with threshold (training): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
def test_zeropoint():
# offset: 1
# C2 += B.sum(0).view(1, -1)*zp
C2 = torch.matmul(A, B - zp)
C2 -= A.sum(1).view(-1, 1) * zp
ca, cqa, cza = quant_zp(A)
print(ca.min(), ca.max())
print((ca - cza).min(), (ca - cza).max())
zp = 1
scale = 2.0
C5 = torch.matmul((A * scale) - zp, B)
C5 += B.sum(0) * zp
C5 /= scale
CA, qa, zpa = quant_zp(A)
C4 = torch.matmul(CA, B)
C4 -= B.sum(0) * zpa
C4 /= qa
zpb = 1
zpa = 1
qa = 2
qb = 2
C6 = torch.matmul((A * qa) + zpa, (B * qb) + zpb)
C6 -= (qb * B.sum(0).view(1, -1) * zpa) + (qa * A.sum(1).view(-1, 1) * zpb)
C6 -= zpa * zpb * A.shape[1]
C6 /= qa * qb
CA, qa, zpa = quant_zp(A)
CB, qb, zpb = quant_zp(B)
C7 = torch.matmul(CA, CB)
C7</s>
===========below chunk 1===========
<s>cuda.synchronize()
#t0 = time.time()
#for i in range(iters):
# linear8bit_train(A)
#torch.cuda.synchronize()
#print( f"bnb linear8bitlt with threshold (training): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
def test_zeropoint():
# offset: 2
<s> qb, zpb = quant_zp(B)
C7 = torch.matmul(CA, CB)
C7 -= (qb * B.sum(0).view(1, -1) * zpa) + (qa * A.sum(1).view(-1, 1) * zpb)
C7 -= zpa * zpb * A.shape[1]
C7 /= qa * qb
print("")
# print(C0.flatten()[:10])
print(C1.flatten()[:10])
print(C2.flatten()[:10])
print(C3.flatten()[:10])
print(C5.flatten()[:10])
print(C6.flatten()[:10])
print(C7.flatten()[:10])
err1 = torch.abs(C1 - C2).mean().item()
err2 = torch.abs(C1 - C3).mean().item()
err3 = torch.abs(C1 - C4).mean().item()
err4 = torch.abs(C1 - C5).mean().item()
err5 = torch.abs(C1 - C6).mean().item()
err6 = torch.abs(C1 - C7).mean().item()
print(err1, err2, err3, err4, err5, err6)
===========changed ref 0===========
# module: bitsandbytes
if COMPILED_WITH_CUDA:
from .optim import adam
__pdoc__ = {
"libbitsandbytes": False,
"optim.optimizer.Optimizer8bit": False,
"optim.optimizer.MockArgs": False,
}
+ __version__ = "0.42.0"
- __version__ = "0.41.3.post1"
PACKAGE_GITHUB_URL = "https://github.com/TimDettmers/bitsandbytes"
===========changed ref 1===========
# module: tests.test_cuda_setup_evaluator
# hardcoded test. Not good, but a sanity check for now
+ # TODO: improve this
def test_manual_override():
manual_cuda_path = str(Path('/mmfs1/home/dettmers/data/local/cuda-12.2'))
pytorch_version = torch.version.cuda.replace('.', '')
assert pytorch_version != 122
os.environ['CUDA_HOME']='{manual_cuda_path}'
+ os.environ['BNB_CUDA_VERSION']='122'
- os.environ['CUDA_VERSION']='122'
+ #assert str(manual_cuda_path) in os.environ['LD_LIBRARY_PATH']
- assert str(manual_cuda_path) in os.environ['LD_LIBRARY_PATH']
import bitsandbytes as bnb
loaded_lib = bnb.cuda_setup.main.CUDASetup.get_instance().binary_name
- assert loaded_lib == 'libbitsandbytes_cuda122.so'
===========changed ref 2===========
# module: bitsandbytes.functional
def create_normal_map(offset=0.9677083, use_extra_value=True):
+ from scipy.stats import norm
+
if use_extra_value:
# one more positive value, this is an asymmetric type
v1 = norm.ppf(torch.linspace(offset, 0.5, 9)[:-1]).tolist()
v2 = [0]*(256-15) ## we have 15 non-zero values in this data type
v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
else:
v1 = norm.ppf(torch.linspace(offset, 0.5, 8)[:-1]).tolist()
v2 = [0]*(256-14) ## we have 14 non-zero values in this data type
v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
v = v1 + v2 + v3
values = torch.Tensor(v)
values = values.sort().values
values /= values.max()
assert values.numel() == 256
return values
|
|
tests.test_functional/test_normal_map_tree
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
4870580f17767a4165ec05d954dff2b5c25b694d
|
Fixed bnb input in setup.py. Bumped version for release.
|
<3>:<add> #print(values)
<del> print(values)
<6>:<add> #print(idx)
<del> print(idx)
<11>:<del> print(pivots)
|
# module: tests.test_functional
#print((time.time()-t0)/iters*1e6)
#torch.cuda.synchronize()
#t0 = time.time()
#for i in range(iters):
# torch.matmul(b, a.t())
#torch.cuda.synchronize()
#print((time.time()-t0)/iters*1e6)
def test_normal_map_tree():
<0> code = F.create_normal_map()
<1> values =code[:8].tolist() + code[-8:].tolist()
<2> num_pivots = 1
<3> print(values)
<4> while num_pivots <16:
<5> idx = list(range(16//num_pivots//2, 16, 16//num_pivots))
<6> print(idx)
<7> num_pivots *= 2
<8> pivots = []
<9> for i in idx:
<10> pivots.append((values[i-1]+values[i])/2)
<11> print(pivots)
<12>
|
===========changed ref 0===========
# module: bitsandbytes
if COMPILED_WITH_CUDA:
from .optim import adam
__pdoc__ = {
"libbitsandbytes": False,
"optim.optimizer.Optimizer8bit": False,
"optim.optimizer.MockArgs": False,
}
+ __version__ = "0.42.0"
- __version__ = "0.41.3.post1"
PACKAGE_GITHUB_URL = "https://github.com/TimDettmers/bitsandbytes"
===========changed ref 1===========
# module: tests.test_cuda_setup_evaluator
# hardcoded test. Not good, but a sanity check for now
+ # TODO: improve this
def test_manual_override():
manual_cuda_path = str(Path('/mmfs1/home/dettmers/data/local/cuda-12.2'))
pytorch_version = torch.version.cuda.replace('.', '')
assert pytorch_version != 122
os.environ['CUDA_HOME']='{manual_cuda_path}'
+ os.environ['BNB_CUDA_VERSION']='122'
- os.environ['CUDA_VERSION']='122'
+ #assert str(manual_cuda_path) in os.environ['LD_LIBRARY_PATH']
- assert str(manual_cuda_path) in os.environ['LD_LIBRARY_PATH']
import bitsandbytes as bnb
loaded_lib = bnb.cuda_setup.main.CUDASetup.get_instance().binary_name
- assert loaded_lib == 'libbitsandbytes_cuda122.so'
===========changed ref 2===========
# module: bitsandbytes.functional
def create_normal_map(offset=0.9677083, use_extra_value=True):
+ from scipy.stats import norm
+
if use_extra_value:
# one more positive value, this is an asymmetric type
v1 = norm.ppf(torch.linspace(offset, 0.5, 9)[:-1]).tolist()
v2 = [0]*(256-15) ## we have 15 non-zero values in this data type
v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
else:
v1 = norm.ppf(torch.linspace(offset, 0.5, 8)[:-1]).tolist()
v2 = [0]*(256-14) ## we have 14 non-zero values in this data type
v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
v = v1 + v2 + v3
values = torch.Tensor(v)
values = values.sort().values
values /= values.max()
assert values.numel() == 256
return values
===========changed ref 3===========
<s> #torch.cuda.synchronize()
#t0 = time.time()
#for i in range(iters):
# linear8bit_train(A)
#torch.cuda.synchronize()
#print( f"bnb linear8bitlt with threshold (training): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
def test_zeropoint():
def quant_zp(x):
dtype = x.dtype
x = x.float()
dyna = x.max() - x.min()
if dyna == 0:
dyna = 1
qx = 254.0 / dyna
minx = x.min()
# zpx = torch.round(minx* qx)
# zpx = 127 - torch.round(x.max()* qx)
zpx = torch.round(x.min() * qx) - 127
x = (qx * x) + zpx
return x, qx, zpx
batch = 2
seq = 512
model = 1024
hidden = 4 * model
A = torch.randn(batch * seq, model, device="cuda").half() * 0.1
B = torch.randn(model, hidden, device="cuda").half() * 0.1
C0 = torch.matmul(A, B)
# A, SA = F.vectorwise_quant(A, quant_type='linear')
# B, SB = F.vectorwise_quant(B, quant_type='linear')
A = A.float()
B = B.float()
C1 = torch.matmul(A, B)
C3 = bnb.matmul(A.half(), B.t().contiguous().half())
zp = 1
# C2 = torch.matmul(A-zp, B)
# C2 += B.sum(0).view(1, -1)*zp</s>
===========changed ref 4===========
<s>cuda.synchronize()
#t0 = time.time()
#for i in range(iters):
# linear8bit_train(A)
#torch.cuda.synchronize()
#print( f"bnb linear8bitlt with threshold (training): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s")
def test_zeropoint():
# offset: 1
<s>mul(A-zp, B)
# C2 += B.sum(0).view(1, -1)*zp
C2 = torch.matmul(A, B - zp)
C2 -= A.sum(1).view(-1, 1) * zp
ca, cqa, cza = quant_zp(A)
+ #print(ca.min(), ca.max())
- print(ca.min(), ca.max())
+ #print((ca - cza).min(), (ca - cza).max())
- print((ca - cza).min(), (ca - cza).max())
zp = 1
scale = 2.0
C5 = torch.matmul((A * scale) - zp, B)
C5 += B.sum(0) * zp
C5 /= scale
CA, qa, zpa = quant_zp(A)
C4 = torch.matmul(CA, B)
C4 -= B.sum(0) * zpa
C4 /= qa
zpb = 1
zpa = 1
qa = 2
qb = 2
C6 = torch.matmul((A * qa) + zpa, (B * qb) + zpb)
C6 -= (qb * B.sum(0).view(1, -1) * zpa) + (qa * A.sum(1).view(-1, 1)</s>
|
tests.test_functional/test_gemv_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
4870580f17767a4165ec05d954dff2b5c25b694d
|
Fixed bnb input in setup.py. Bumped version for release.
|
<s>4', 'fp4'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
<0> for dim in [128, 256, 512, 1024]:
<1> #for dim in [4*1024]:
<2> #for dim in [1*16]:
<3> errs1 = []
<4> errs2 = []
<5> errs3 = []
<6> relerrs1 = []
<7> relerrs2 = []
<8> relerrs3 = []
<9> max_errs1 = []
<10> max_errs2 = []
<11> max_errs3 = []
<12>
<13>
<14> for i in range(100):
<15> if kind == 'fc1':
<16> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<17> B = torch.randn(dim*4, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<18> elif kind == 'fc2':
<19> A = torch.randn(1, 4*dim, dtype=dtype, device='cuda')
<20> B = torch.randn(dim, 4*dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<21> elif kind == 'attn':
<22> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<23> B = torch.randn(dim, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<24> elif kind == 'attn_packed':
<25> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<26> B = torch.randn(dim*3, dim, dtype=dtype, device</s>
|
===========below chunk 0===========
<s>'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 1
qB, state = F.quantize_4bit(B, quant_type=storage_type, compress_statistics=double_quant)
C3 = torch.matmul(A, B.t())
C2 = F.gemv_4bit(A, qB.t(), state=state)
A.requires_grad = True
C1 = bnb.matmul_4bit(A, qB.t(), state)
err1 = (C1-C2).abs().float()
err2 = (C3-C2).abs().float()
err3 = (C3-C1).abs().float()
mag1 = torch.abs(C1).float()+1e-5
mag2 = torch.abs(C3).float()+1e-5
mag3 = torch.abs(C3).float()+1e-5
relerr1 = err1/mag1
relerr2 = err2/mag2
relerr3 = err3/mag3
max_err1 = err1.max()
max_err2 = err2.max()
max_err3 = err3.max()
errs1.append(err1.mean().item())
errs2.append(err2.mean().item())
errs3.append(err3.mean().item())
relerrs1.append(relerr1.mean().item())
relerrs2.append(relerr2.mean().item())
relerrs3.append(relerr3.mean().item())
max_</s>
===========below chunk 1===========
<s>'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 2
<s>relerr2.mean().item())
relerrs3.append(relerr3.mean().item())
max_errs1.append(max_err1.item())
max_errs2.append(max_err2.item())
max_errs3.append(max_err3.item())
c = int(C1.numel()*0.0014*(dim/256))+1
c = assert_all_approx_close(C1, C2, 1e-5, 0.01, count=c, throw=False)
err1 = sum(errs1)/len(errs1)/math.sqrt(dim)
err2 = sum(errs2)/len(errs2)/math.sqrt(dim)
err3 = sum(errs3)/len(errs3)/math.sqrt(dim)
relerr1 = sum(relerrs1)/len(relerrs1)/math.sqrt(dim)
relerr2 = sum(relerrs2)/len(relerrs2)/math.sqrt(dim)
relerr3 = sum(relerrs3)/len(relerrs3)/math.sqrt(dim)
maxerr1 = sum(max_errs1)/len(max_errs1)/math.sqrt(dim)
maxerr2 = sum(max_errs2)/len(max_errs2)/math.sqrt(dim)
maxerr3 = sum(max</s>
===========below chunk 2===========
<s>'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 3
<s>s3)/len(max_errs3)/math.sqrt(dim)
absratio = err2/err3
relratio = relerr2/relerr3
maxratio = relerr2/relerr3
# for debugging if the tests fails
#
#print('='*80)
#print(f'For matmul: {A.shape}, {B.shape}, {kind}, {dtype}, {storage_type}, double_quant={double_quant}:')
print(C1.flatten()[-20:])
print(C2.flatten()[-20:])
print(f'inference vs training abs: {err1}')
print(f'inference vs training rel: {relerr1}')
print(f'inference vs training max: {maxerr1}')
#print(f'inference vs training vs torch err ratio abs: {absratio}')
#print(f'inference vs training vs torch err ratio rel: {relratio}')
#print(f'inference vs training vs torch err ratio max: {maxratio}')
if dtype == torch.float16:
if dim <= 512:
assert err1 < 7e-5
assert relerr1 < 0.0008
else:
assert err1 < 6e-5
assert relerr1 < 2e-4
assert absratio < 1.005 and absratio > 0.995
assert relratio < 1.005 and rel</s>
===========below chunk 3===========
<s>'])
@pytest.mark.parametrize("kind", ['fc1', 'fc2', 'attn', 'attn_packed'], ids=['fc1', 'fc2', 'attn', 'attn_packed'])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 4
<s> 0.995
assert maxratio < 1.005 and maxratio > 0.995
elif dtype == torch.float32:
if dim <= 512:
assert err1 < 5e-8
assert relerr1 < 1e-6
assert maxerr1 < 1e-7
else:
assert err1 < 5e-8
assert relerr1 < 8e-6
assert maxerr1 < 1e-7
assert absratio < 1.005 and absratio > 0.995
assert relratio < 1.005 and relratio > 0.995
assert maxratio < 1.005 and maxratio > 0.995
elif dtype == torch.bfloat16:
if dim <= 512:
assert err1 < 6e-4
assert relerr1 < 0.007
assert maxerr1 < 0.015
else:
assert err1 < 2e-4
assert relerr1 < 0.002
assert maxerr1 < 0.0012
assert absratio < 1.005 and absratio > 0.995
assert relratio < 1.04 and relratio > 0.96
assert maxratio < 1.02 and maxratio > 0.98
|
|
tests.test_modules/test_kbit_backprop
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
4870580f17767a4165ec05d954dff2b5c25b694d
|
Fixed bnb input in setup.py. Bumped version for release.
|
# module: tests.test_modules
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
@pytest.mark.parametrize("module", modules, ids=names)
def test_kbit_backprop(module):
<0> b = 17
<1> dim1 = 37
<2> dim2 = 83
<3>
<4> ref = nn.Sequential(*[torch.nn.Linear(dim1, dim2), torch.nn.Linear(dim2, 10)])
<5> ref[1].weight.requires_grad = False
<6> torch.nn.init.kaiming_normal_(ref[0].weight)
<7> torch.nn.init.kaiming_normal_(ref[1].weight)
<8> kbit = nn.Sequential(*[torch.nn.Linear(dim1, dim2), module(dim2, 10)])
<9> kbit[0].weight.detach().copy_(ref[0].weight)
<10> kbit[1].weight.detach().copy_(ref[1].weight)
<11> kbit[0].bias.detach().copy_(ref[0].bias)
<12> kbit[1].bias.detach().copy_(ref[1].bias)
<13> ref = ref.half().cuda()
<14> kbit = kbit.half().cuda()
<15> kbit = kbit.half().to('cuda')
<16>
<17> errs1 = []
<18> errs2 = []
<19> relerrs1 = []
<20> relerrs2 = []
<21> for i in range(100):
<22> batch = torch.randn(b, dim1).half().cuda()
<23> out1 = ref(batch)
<24> out2 = kbit(batch)
<25> out1.mean().backward()
<26> out2.mean().backward()
<27>
<28> grad1 = ref[0].weight.grad
<29> grad2 = kbit[0].weight.grad
<30> bgrad1 = ref[0].bias.grad
<31> bgrad2 = kbit[0].bias.grad
<32>
<33> </s>
|
===========below chunk 0===========
# module: tests.test_modules
@pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU")
@pytest.mark.parametrize("module", modules, ids=names)
def test_kbit_backprop(module):
# offset: 1
err2 = (grad1-grad2).abs().float()
relerr1 = (err1/(out1.abs().float()+1e-9))
relerr2 = (err2/(grad1.abs().float()+1e-9))
errs1.append(err1.mean().item())
errs2.append(err2.mean().item())
relerrs1.append(relerr1.mean().item())
relerrs2.append(relerr2.mean().item())
if isinstance(module, bnb.nn.Linear8bitLt):
assert_all_approx_close(grad1, grad2, atol=0.008, rtol=0.05, count=1)
torch.testing.assert_close(bgrad1, bgrad2, atol=0.008, rtol=0.05)
else:
assert_all_approx_close(grad1, grad2, atol=0.015, rtol=0.05, count=1)
torch.testing.assert_close(bgrad1, bgrad2, atol=0.02, rtol=0.05)
ref.zero_grad()
kbit.zero_grad()
assert kbit[0].weight.grad is None or kbit[0].weight.grad.sum().item() == 0
assert kbit[0].weight.grad is None or kbit[0].bias.grad.sum().item() == 0
print('out', sum(errs1)/len(errs1))
print('grad', sum(errs2)/len(errs2))
print('rel out', sum(relerrs1)/len(relerrs1))
print('rel grad', sum(relerrs2)/len(relerrs2))
===========changed ref 0===========
# module: setup
- VERSION = bnb.__version__
-
libs = list(glob.glob("./bitsandbytes/libbitsandbytes*.so"))
libs = [os.path.basename(p) for p in libs]
print("libs:", libs)
===========changed ref 1===========
# module: bitsandbytes
if COMPILED_WITH_CUDA:
from .optim import adam
__pdoc__ = {
"libbitsandbytes": False,
"optim.optimizer.Optimizer8bit": False,
"optim.optimizer.MockArgs": False,
}
+ __version__ = "0.42.0"
- __version__ = "0.41.3.post1"
PACKAGE_GITHUB_URL = "https://github.com/TimDettmers/bitsandbytes"
===========changed ref 2===========
# module: tests.test_functional
#print((time.time()-t0)/iters*1e6)
#torch.cuda.synchronize()
#t0 = time.time()
#for i in range(iters):
# torch.matmul(b, a.t())
#torch.cuda.synchronize()
#print((time.time()-t0)/iters*1e6)
def test_normal_map_tree():
code = F.create_normal_map()
values =code[:8].tolist() + code[-8:].tolist()
num_pivots = 1
+ #print(values)
- print(values)
while num_pivots <16:
idx = list(range(16//num_pivots//2, 16, 16//num_pivots))
+ #print(idx)
- print(idx)
num_pivots *= 2
pivots = []
for i in idx:
pivots.append((values[i-1]+values[i])/2)
- print(pivots)
===========changed ref 3===========
# module: setup
setup(
name=f"bitsandbytes",
+ version="0.42.0",
- version=VERSION,
author="Tim Dettmers",
author_email="[email protected]",
description="k-bit optimizers and matrix multiplication routines.",
license="MIT",
keywords="gpu optimizers optimization 8-bit quantization compression",
url="https://github.com/TimDettmers/bitsandbytes",
install_requires=['scipy'],
packages=find_packages(),
package_data={"": libs},
long_description=read("README.md"),
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
===========changed ref 4===========
# module: tests.test_cuda_setup_evaluator
# hardcoded test. Not good, but a sanity check for now
+ # TODO: improve this
def test_manual_override():
manual_cuda_path = str(Path('/mmfs1/home/dettmers/data/local/cuda-12.2'))
pytorch_version = torch.version.cuda.replace('.', '')
assert pytorch_version != 122
os.environ['CUDA_HOME']='{manual_cuda_path}'
+ os.environ['BNB_CUDA_VERSION']='122'
- os.environ['CUDA_VERSION']='122'
+ #assert str(manual_cuda_path) in os.environ['LD_LIBRARY_PATH']
- assert str(manual_cuda_path) in os.environ['LD_LIBRARY_PATH']
import bitsandbytes as bnb
loaded_lib = bnb.cuda_setup.main.CUDASetup.get_instance().binary_name
- assert loaded_lib == 'libbitsandbytes_cuda122.so'
===========changed ref 5===========
# module: bitsandbytes.functional
def create_normal_map(offset=0.9677083, use_extra_value=True):
+ from scipy.stats import norm
+
if use_extra_value:
# one more positive value, this is an asymmetric type
v1 = norm.ppf(torch.linspace(offset, 0.5, 9)[:-1]).tolist()
v2 = [0]*(256-15) ## we have 15 non-zero values in this data type
v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
else:
v1 = norm.ppf(torch.linspace(offset, 0.5, 8)[:-1]).tolist()
v2 = [0]*(256-14) ## we have 14 non-zero values in this data type
v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
v = v1 + v2 + v3
values = torch.Tensor(v)
values = values.sort().values
values /= values.max()
assert values.numel() == 256
return values
|
|
bitsandbytes.utils/unpack_tensor_to_dict
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
d05b508356914ed8a576b9ec78708cd910529d34
|
Move to CPU before attempting to convert to numpy. (#922)
|
<9>:<add> json_bytes = bytes(tensor_data.cpu().numpy())
<del> json_bytes = bytes(tensor_data.numpy())
|
# module: bitsandbytes.utils
def unpack_tensor_to_dict(tensor_data):
<0> """
<1> Unpack a torch tensor into a Python dictionary.
<2>
<3> Parameters:
<4> - tensor_data: The torch tensor containing the packed data.
<5>
<6> Returns:
<7> A Python dictionary containing the unpacked data.
<8> """
<9> json_bytes = bytes(tensor_data.numpy())
<10> json_str = json_bytes.decode('utf-8')
<11> unpacked_dict = json.loads(json_str)
<12>
<13> return unpacked_dict
<14>
|
===========unchanged ref 0===========
at: json
loads(s: Union[str, bytes], *, cls: Optional[Type[JSONDecoder]]=..., object_hook: Optional[Callable[[Dict[Any, Any]], Any]]=..., parse_float: Optional[Callable[[str], Any]]=..., parse_int: Optional[Callable[[str], Any]]=..., parse_constant: Optional[Callable[[str], Any]]=..., object_pairs_hook: Optional[Callable[[List[Tuple[Any, Any]]], Any]]=..., **kwds: Any) -> Any
|
bitsandbytes.nn.modules/Params4bit.__new__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
dcfb6f81433e37a8546f7dab3f648eaf858b29ff
|
Initial FSDP Support for QLoRA Finetuning (#970)
|
<8>:<add> self.quant_storage = quant_storage
<add> self.bnb_quantized = bnb_quantized
<9>:<add> self.module = module
|
<s> str = 'fp4',
+ quant_storage: torch.dtype = torch.uint8,
+ module: Optional["Linear4bit"] = None,
+ bnb_quantized: bool = False
+ ) -> "Params4bit":
- def __new__(cls, data: Optional[torch.Tensor] = None, requires_grad=True, quant_state: QuantState = None, blocksize: int = 64, compress_statistics: bool = True, quant_type: str = 'fp4') -> "Params4bit":
<0> if data is None:
<1> data = torch.empty(0)
<2>
<3> self = torch.Tensor._make_subclass(cls, data, requires_grad)
<4> self.blocksize = blocksize
<5> self.compress_statistics = compress_statistics
<6> self.quant_type = quant_type
<7> self.quant_state = quant_state
<8> self.data = data
<9> return self
<10>
| |
bitsandbytes.nn.modules/Params4bit.from_prequantized
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
dcfb6f81433e37a8546f7dab3f648eaf858b29ff
|
Initial FSDP Support for QLoRA Finetuning (#970)
|
<6>:<add> self.bnb_quantized = True
|
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
@classmethod
def from_prequantized(cls, data: torch.Tensor, quantized_stats: Dict[str, Any], requires_grad: bool = False, device='cuda', **kwargs) -> "Params4bit":
<0> self = torch.Tensor._make_subclass(cls, data.to(device))
<1> self.requires_grad = requires_grad
<2> self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
<3> self.blocksize = self.quant_state.blocksize
<4> self.compress_statistics = self.quant_state.nested
<5> self.quant_type = self.quant_state.quant_type
<6> return self
<7>
|
===========changed ref 0===========
<s> str = 'fp4',
+ quant_storage: torch.dtype = torch.uint8,
+ module: Optional["Linear4bit"] = None,
+ bnb_quantized: bool = False
+ ) -> "Params4bit":
- def __new__(cls, data: Optional[torch.Tensor] = None, requires_grad=True, quant_state: QuantState = None, blocksize: int = 64, compress_statistics: bool = True, quant_type: str = 'fp4') -> "Params4bit":
if data is None:
data = torch.empty(0)
self = torch.Tensor._make_subclass(cls, data, requires_grad)
self.blocksize = blocksize
self.compress_statistics = compress_statistics
self.quant_type = quant_type
self.quant_state = quant_state
+ self.quant_storage = quant_storage
+ self.bnb_quantized = bnb_quantized
self.data = data
+ self.module = module
return self
|
bitsandbytes.nn.modules/Params4bit.cuda
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
dcfb6f81433e37a8546f7dab3f648eaf858b29ff
|
Initial FSDP Support for QLoRA Finetuning (#970)
|
<0>:<del> w = self.data.contiguous().half().cuda(device)
<1>:<del> w_4bit, quant_state = bnb.functional.quantize_4bit(w, blocksize=self.blocksize, compress_statistics=self.compress_statistics, quant_type=self.quant_type)
<2>:<del> self.data = w_4bit
<3>:<del> self.quant_state = quant_state
<4>:<add> return self.to(device='cuda' if device is None else device, non_blocking=non_blocking)
<5>:<del> return self
<6>:<del>
|
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
+ def cuda(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):
- def cuda(self, device):
<0> w = self.data.contiguous().half().cuda(device)
<1> w_4bit, quant_state = bnb.functional.quantize_4bit(w, blocksize=self.blocksize, compress_statistics=self.compress_statistics, quant_type=self.quant_type)
<2> self.data = w_4bit
<3> self.quant_state = quant_state
<4>
<5> return self
<6>
|
===========changed ref 0===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
@classmethod
def from_prequantized(cls, data: torch.Tensor, quantized_stats: Dict[str, Any], requires_grad: bool = False, device='cuda', **kwargs) -> "Params4bit":
self = torch.Tensor._make_subclass(cls, data.to(device))
self.requires_grad = requires_grad
self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
self.blocksize = self.quant_state.blocksize
self.compress_statistics = self.quant_state.nested
self.quant_type = self.quant_state.quant_type
+ self.bnb_quantized = True
return self
===========changed ref 1===========
<s> str = 'fp4',
+ quant_storage: torch.dtype = torch.uint8,
+ module: Optional["Linear4bit"] = None,
+ bnb_quantized: bool = False
+ ) -> "Params4bit":
- def __new__(cls, data: Optional[torch.Tensor] = None, requires_grad=True, quant_state: QuantState = None, blocksize: int = 64, compress_statistics: bool = True, quant_type: str = 'fp4') -> "Params4bit":
if data is None:
data = torch.empty(0)
self = torch.Tensor._make_subclass(cls, data, requires_grad)
self.blocksize = blocksize
self.compress_statistics = compress_statistics
self.quant_type = quant_type
self.quant_state = quant_state
+ self.quant_storage = quant_storage
+ self.bnb_quantized = bnb_quantized
self.data = data
+ self.module = module
return self
|
bitsandbytes.nn.modules/Params4bit.to
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
dcfb6f81433e37a8546f7dab3f648eaf858b29ff
|
Initial FSDP Support for QLoRA Finetuning (#970)
|
<2>:<add> if (device is not None and device.type == "cuda" and not self.bnb_quantized):
<del> if (device is not None and device.type == "cuda" and self.data.device.type == "cpu"):
<3>:<add> return self._quantize(device)
<del> return self.cuda(device)
|
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
def to(self, *args, **kwargs):
<0> device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)
<1>
<2> if (device is not None and device.type == "cuda" and self.data.device.type == "cpu"):
<3> return self.cuda(device)
<4> else:
<5> if self.quant_state is not None:
<6> self.quant_state.to(device)
<7>
<8> new_param = Params4bit(super().to(device=device, dtype=dtype, non_blocking=non_blocking),
<9> requires_grad=self.requires_grad, quant_state=self.quant_state,
<10> blocksize=self.blocksize, compress_statistics=self.compress_statistics,
<11> quant_type=self.quant_type)
<12>
<13> return new_param
<14>
|
===========changed ref 0===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
+ def cuda(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):
- def cuda(self, device):
- w = self.data.contiguous().half().cuda(device)
- w_4bit, quant_state = bnb.functional.quantize_4bit(w, blocksize=self.blocksize, compress_statistics=self.compress_statistics, quant_type=self.quant_type)
- self.data = w_4bit
- self.quant_state = quant_state
+ return self.to(device='cuda' if device is None else device, non_blocking=non_blocking)
- return self
-
===========changed ref 1===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
+ def _quantize(self, device):
+ w = self.data.contiguous().cuda(device)
+ w_4bit, quant_state = bnb.functional.quantize_4bit(w, blocksize=self.blocksize, compress_statistics=self.compress_statistics,
+ quant_type=self.quant_type, quant_storage=self.quant_storage)
+ self.data = w_4bit
+ self.quant_state = quant_state
+ if self.module is not None:
+ self.module.quant_state = quant_state
+ self.bnb_quantized = True
+ return self
+
===========changed ref 2===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
@classmethod
def from_prequantized(cls, data: torch.Tensor, quantized_stats: Dict[str, Any], requires_grad: bool = False, device='cuda', **kwargs) -> "Params4bit":
self = torch.Tensor._make_subclass(cls, data.to(device))
self.requires_grad = requires_grad
self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
self.blocksize = self.quant_state.blocksize
self.compress_statistics = self.quant_state.nested
self.quant_type = self.quant_state.quant_type
+ self.bnb_quantized = True
return self
===========changed ref 3===========
<s> str = 'fp4',
+ quant_storage: torch.dtype = torch.uint8,
+ module: Optional["Linear4bit"] = None,
+ bnb_quantized: bool = False
+ ) -> "Params4bit":
- def __new__(cls, data: Optional[torch.Tensor] = None, requires_grad=True, quant_state: QuantState = None, blocksize: int = 64, compress_statistics: bool = True, quant_type: str = 'fp4') -> "Params4bit":
if data is None:
data = torch.empty(0)
self = torch.Tensor._make_subclass(cls, data, requires_grad)
self.blocksize = blocksize
self.compress_statistics = compress_statistics
self.quant_type = quant_type
self.quant_state = quant_state
+ self.quant_storage = quant_storage
+ self.bnb_quantized = bnb_quantized
self.data = data
+ self.module = module
return self
|
bitsandbytes.nn.modules/Linear4bit.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
dcfb6f81433e37a8546f7dab3f648eaf858b29ff
|
Initial FSDP Support for QLoRA Finetuning (#970)
|
<1>:<add> self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type, quant_storage=quant_storage, module=self)
<del> self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
<5>:<add> self.quant_state = None
<add> self.quant_storage = quant_storage
|
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4', quant_storage=torch.uint8, device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4', device=None):
<0> super().__init__(input_features, output_features, bias, device)
<1> self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
<2> # self.persistent_buffers = [] # TODO consider as way to save quant state
<3> self.compute_dtype = compute_dtype
<4> self.compute_type_is_set = False
<5>
|
===========changed ref 0===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
+ def _quantize(self, device):
+ w = self.data.contiguous().cuda(device)
+ w_4bit, quant_state = bnb.functional.quantize_4bit(w, blocksize=self.blocksize, compress_statistics=self.compress_statistics,
+ quant_type=self.quant_type, quant_storage=self.quant_storage)
+ self.data = w_4bit
+ self.quant_state = quant_state
+ if self.module is not None:
+ self.module.quant_state = quant_state
+ self.bnb_quantized = True
+ return self
+
===========changed ref 1===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
+ def cuda(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):
- def cuda(self, device):
- w = self.data.contiguous().half().cuda(device)
- w_4bit, quant_state = bnb.functional.quantize_4bit(w, blocksize=self.blocksize, compress_statistics=self.compress_statistics, quant_type=self.quant_type)
- self.data = w_4bit
- self.quant_state = quant_state
+ return self.to(device='cuda' if device is None else device, non_blocking=non_blocking)
- return self
-
===========changed ref 2===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
@classmethod
def from_prequantized(cls, data: torch.Tensor, quantized_stats: Dict[str, Any], requires_grad: bool = False, device='cuda', **kwargs) -> "Params4bit":
self = torch.Tensor._make_subclass(cls, data.to(device))
self.requires_grad = requires_grad
self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
self.blocksize = self.quant_state.blocksize
self.compress_statistics = self.quant_state.nested
self.quant_type = self.quant_state.quant_type
+ self.bnb_quantized = True
return self
===========changed ref 3===========
<s> str = 'fp4',
+ quant_storage: torch.dtype = torch.uint8,
+ module: Optional["Linear4bit"] = None,
+ bnb_quantized: bool = False
+ ) -> "Params4bit":
- def __new__(cls, data: Optional[torch.Tensor] = None, requires_grad=True, quant_state: QuantState = None, blocksize: int = 64, compress_statistics: bool = True, quant_type: str = 'fp4') -> "Params4bit":
if data is None:
data = torch.empty(0)
self = torch.Tensor._make_subclass(cls, data, requires_grad)
self.blocksize = blocksize
self.compress_statistics = compress_statistics
self.quant_type = quant_type
self.quant_state = quant_state
+ self.quant_storage = quant_storage
+ self.bnb_quantized = bnb_quantized
self.data = data
+ self.module = module
return self
===========changed ref 4===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
def to(self, *args, **kwargs):
device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)
+ if (device is not None and device.type == "cuda" and not self.bnb_quantized):
- if (device is not None and device.type == "cuda" and self.data.device.type == "cpu"):
+ return self._quantize(device)
- return self.cuda(device)
else:
if self.quant_state is not None:
self.quant_state.to(device)
new_param = Params4bit(super().to(device=device, dtype=dtype, non_blocking=non_blocking),
requires_grad=self.requires_grad, quant_state=self.quant_state,
blocksize=self.blocksize, compress_statistics=self.compress_statistics,
quant_type=self.quant_type)
return new_param
|
bitsandbytes.nn.modules/Linear4bit.forward
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
dcfb6f81433e37a8546f7dab3f648eaf858b29ff
|
Initial FSDP Support for QLoRA Finetuning (#970)
|
<5>:<add> if getattr(self, 'quant_state', None) is not None:
<add> # the quant state got lost when the parameter got converted. This happens for example for fsdp
<add> # since we registered the module, we can recover the state here
<add> assert self.weight.shape[1] == 1
<add> if not isinstance(self.weight, Params4bit):
<add> self.weight = Params4bit(self.weight, quant_storage=self.quant_storage)
<add> self.weight.quant_state = self.quant_state
<add> else:
<add> print('FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first.')
<del> print('FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first.')
|
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
def forward(self, x: torch.Tensor):
<0> # weights are cast automatically as Int8Params, but the bias has to be cast manually
<1> if self.bias is not None and self.bias.dtype != x.dtype:
<2> self.bias.data = self.bias.data.to(x.dtype)
<3>
<4> if getattr(self.weight, 'quant_state', None) is None:
<5> print('FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first.')
<6> if not self.compute_type_is_set:
<7> self.set_compute_type(x)
<8> self.compute_type_is_set = True
<9>
<10> inp_dtype = x.dtype
<11> if self.compute_dtype is not None:
<12> x = x.to(self.compute_dtype)
<13>
<14> bias = None if self.bias is None else self.bias.to(self.compute_dtype)
<15> out = bnb.matmul_4bit(x, self.weight.t(), bias=bias, quant_state=self.weight.quant_state)
<16>
<17> out = out.to(inp_dtype)
<18>
<19> return out
<20>
|
===========changed ref 0===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
+ def _quantize(self, device):
+ w = self.data.contiguous().cuda(device)
+ w_4bit, quant_state = bnb.functional.quantize_4bit(w, blocksize=self.blocksize, compress_statistics=self.compress_statistics,
+ quant_type=self.quant_type, quant_storage=self.quant_storage)
+ self.data = w_4bit
+ self.quant_state = quant_state
+ if self.module is not None:
+ self.module.quant_state = quant_state
+ self.bnb_quantized = True
+ return self
+
===========changed ref 1===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
+ def cuda(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):
- def cuda(self, device):
- w = self.data.contiguous().half().cuda(device)
- w_4bit, quant_state = bnb.functional.quantize_4bit(w, blocksize=self.blocksize, compress_statistics=self.compress_statistics, quant_type=self.quant_type)
- self.data = w_4bit
- self.quant_state = quant_state
+ return self.to(device='cuda' if device is None else device, non_blocking=non_blocking)
- return self
-
===========changed ref 2===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
@classmethod
def from_prequantized(cls, data: torch.Tensor, quantized_stats: Dict[str, Any], requires_grad: bool = False, device='cuda', **kwargs) -> "Params4bit":
self = torch.Tensor._make_subclass(cls, data.to(device))
self.requires_grad = requires_grad
self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
self.blocksize = self.quant_state.blocksize
self.compress_statistics = self.quant_state.nested
self.quant_type = self.quant_state.quant_type
+ self.bnb_quantized = True
return self
===========changed ref 3===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4', quant_storage=torch.uint8, device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4', device=None):
super().__init__(input_features, output_features, bias, device)
+ self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type, quant_storage=quant_storage, module=self)
- self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
# self.persistent_buffers = [] # TODO consider as way to save quant state
self.compute_dtype = compute_dtype
self.compute_type_is_set = False
+ self.quant_state = None
+ self.quant_storage = quant_storage
===========changed ref 4===========
<s> str = 'fp4',
+ quant_storage: torch.dtype = torch.uint8,
+ module: Optional["Linear4bit"] = None,
+ bnb_quantized: bool = False
+ ) -> "Params4bit":
- def __new__(cls, data: Optional[torch.Tensor] = None, requires_grad=True, quant_state: QuantState = None, blocksize: int = 64, compress_statistics: bool = True, quant_type: str = 'fp4') -> "Params4bit":
if data is None:
data = torch.empty(0)
self = torch.Tensor._make_subclass(cls, data, requires_grad)
self.blocksize = blocksize
self.compress_statistics = compress_statistics
self.quant_type = quant_type
self.quant_state = quant_state
+ self.quant_storage = quant_storage
+ self.bnb_quantized = bnb_quantized
self.data = data
+ self.module = module
return self
===========changed ref 5===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
def to(self, *args, **kwargs):
device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)
+ if (device is not None and device.type == "cuda" and not self.bnb_quantized):
- if (device is not None and device.type == "cuda" and self.data.device.type == "cpu"):
+ return self._quantize(device)
- return self.cuda(device)
else:
if self.quant_state is not None:
self.quant_state.to(device)
new_param = Params4bit(super().to(device=device, dtype=dtype, non_blocking=non_blocking),
requires_grad=self.requires_grad, quant_state=self.quant_state,
blocksize=self.blocksize, compress_statistics=self.compress_statistics,
quant_type=self.quant_type)
return new_param
|
bitsandbytes.nn.modules/LinearFP4.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
dcfb6f81433e37a8546f7dab3f648eaf858b29ff
|
Initial FSDP Support for QLoRA Finetuning (#970)
|
<0>:<add> super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4', quant_storage, device)
<del> super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4', device)
|
# module: bitsandbytes.nn.modules
class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_storage=torch.uint8, device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, device=None):
<0> super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4', device)
<1>
|
===========changed ref 0===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
+ def _quantize(self, device):
+ w = self.data.contiguous().cuda(device)
+ w_4bit, quant_state = bnb.functional.quantize_4bit(w, blocksize=self.blocksize, compress_statistics=self.compress_statistics,
+ quant_type=self.quant_type, quant_storage=self.quant_storage)
+ self.data = w_4bit
+ self.quant_state = quant_state
+ if self.module is not None:
+ self.module.quant_state = quant_state
+ self.bnb_quantized = True
+ return self
+
===========changed ref 1===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
+ def cuda(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):
- def cuda(self, device):
- w = self.data.contiguous().half().cuda(device)
- w_4bit, quant_state = bnb.functional.quantize_4bit(w, blocksize=self.blocksize, compress_statistics=self.compress_statistics, quant_type=self.quant_type)
- self.data = w_4bit
- self.quant_state = quant_state
+ return self.to(device='cuda' if device is None else device, non_blocking=non_blocking)
- return self
-
===========changed ref 2===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
@classmethod
def from_prequantized(cls, data: torch.Tensor, quantized_stats: Dict[str, Any], requires_grad: bool = False, device='cuda', **kwargs) -> "Params4bit":
self = torch.Tensor._make_subclass(cls, data.to(device))
self.requires_grad = requires_grad
self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
self.blocksize = self.quant_state.blocksize
self.compress_statistics = self.quant_state.nested
self.quant_type = self.quant_state.quant_type
+ self.bnb_quantized = True
return self
===========changed ref 3===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4', quant_storage=torch.uint8, device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4', device=None):
super().__init__(input_features, output_features, bias, device)
+ self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type, quant_storage=quant_storage, module=self)
- self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
# self.persistent_buffers = [] # TODO consider as way to save quant state
self.compute_dtype = compute_dtype
self.compute_type_is_set = False
+ self.quant_state = None
+ self.quant_storage = quant_storage
===========changed ref 4===========
<s> str = 'fp4',
+ quant_storage: torch.dtype = torch.uint8,
+ module: Optional["Linear4bit"] = None,
+ bnb_quantized: bool = False
+ ) -> "Params4bit":
- def __new__(cls, data: Optional[torch.Tensor] = None, requires_grad=True, quant_state: QuantState = None, blocksize: int = 64, compress_statistics: bool = True, quant_type: str = 'fp4') -> "Params4bit":
if data is None:
data = torch.empty(0)
self = torch.Tensor._make_subclass(cls, data, requires_grad)
self.blocksize = blocksize
self.compress_statistics = compress_statistics
self.quant_type = quant_type
self.quant_state = quant_state
+ self.quant_storage = quant_storage
+ self.bnb_quantized = bnb_quantized
self.data = data
+ self.module = module
return self
===========changed ref 5===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
def to(self, *args, **kwargs):
device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)
+ if (device is not None and device.type == "cuda" and not self.bnb_quantized):
- if (device is not None and device.type == "cuda" and self.data.device.type == "cpu"):
+ return self._quantize(device)
- return self.cuda(device)
else:
if self.quant_state is not None:
self.quant_state.to(device)
new_param = Params4bit(super().to(device=device, dtype=dtype, non_blocking=non_blocking),
requires_grad=self.requires_grad, quant_state=self.quant_state,
blocksize=self.blocksize, compress_statistics=self.compress_statistics,
quant_type=self.quant_type)
return new_param
===========changed ref 6===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
def forward(self, x: torch.Tensor):
# weights are cast automatically as Int8Params, but the bias has to be cast manually
if self.bias is not None and self.bias.dtype != x.dtype:
self.bias.data = self.bias.data.to(x.dtype)
if getattr(self.weight, 'quant_state', None) is None:
+ if getattr(self, 'quant_state', None) is not None:
+ # the quant state got lost when the parameter got converted. This happens for example for fsdp
+ # since we registered the module, we can recover the state here
+ assert self.weight.shape[1] == 1
+ if not isinstance(self.weight, Params4bit):
+ self.weight = Params4bit(self.weight, quant_storage=self.quant_storage)
+ self.weight.quant_state = self.quant_state
+ else:
+ print('FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first.')
- print('FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first.')
if not self.compute_type_is_set:
self.set_compute_type(x)
self.compute_type_is_set = True
inp_dtype = x.dtype
if self.compute_dtype is not None:
x = x.to(self.compute_dtype)
bias = None if self.bias is None else self.bias.to(self.compute_dtype)
out = bnb.matmul_4bit(x, self.weight.t(), bias=bias, quant_state=self.weight.quant_state)
out = out.to(inp_dtype)
return out
|
bitsandbytes.nn.modules/LinearNF4.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
dcfb6f81433e37a8546f7dab3f648eaf858b29ff
|
Initial FSDP Support for QLoRA Finetuning (#970)
|
<0>:<add> super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4', quant_storage, device)
<del> super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4', device)
|
# module: bitsandbytes.nn.modules
class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_storage=torch.uint8, device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, device=None):
<0> super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4', device)
<1>
|
===========changed ref 0===========
# module: bitsandbytes.nn.modules
class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_storage=torch.uint8, device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, device=None):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4', quant_storage, device)
- super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4', device)
===========changed ref 1===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
+ def _quantize(self, device):
+ w = self.data.contiguous().cuda(device)
+ w_4bit, quant_state = bnb.functional.quantize_4bit(w, blocksize=self.blocksize, compress_statistics=self.compress_statistics,
+ quant_type=self.quant_type, quant_storage=self.quant_storage)
+ self.data = w_4bit
+ self.quant_state = quant_state
+ if self.module is not None:
+ self.module.quant_state = quant_state
+ self.bnb_quantized = True
+ return self
+
===========changed ref 2===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
+ def cuda(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):
- def cuda(self, device):
- w = self.data.contiguous().half().cuda(device)
- w_4bit, quant_state = bnb.functional.quantize_4bit(w, blocksize=self.blocksize, compress_statistics=self.compress_statistics, quant_type=self.quant_type)
- self.data = w_4bit
- self.quant_state = quant_state
+ return self.to(device='cuda' if device is None else device, non_blocking=non_blocking)
- return self
-
===========changed ref 3===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
@classmethod
def from_prequantized(cls, data: torch.Tensor, quantized_stats: Dict[str, Any], requires_grad: bool = False, device='cuda', **kwargs) -> "Params4bit":
self = torch.Tensor._make_subclass(cls, data.to(device))
self.requires_grad = requires_grad
self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
self.blocksize = self.quant_state.blocksize
self.compress_statistics = self.quant_state.nested
self.quant_type = self.quant_state.quant_type
+ self.bnb_quantized = True
return self
===========changed ref 4===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4', quant_storage=torch.uint8, device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4', device=None):
super().__init__(input_features, output_features, bias, device)
+ self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type, quant_storage=quant_storage, module=self)
- self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
# self.persistent_buffers = [] # TODO consider as way to save quant state
self.compute_dtype = compute_dtype
self.compute_type_is_set = False
+ self.quant_state = None
+ self.quant_storage = quant_storage
===========changed ref 5===========
<s> str = 'fp4',
+ quant_storage: torch.dtype = torch.uint8,
+ module: Optional["Linear4bit"] = None,
+ bnb_quantized: bool = False
+ ) -> "Params4bit":
- def __new__(cls, data: Optional[torch.Tensor] = None, requires_grad=True, quant_state: QuantState = None, blocksize: int = 64, compress_statistics: bool = True, quant_type: str = 'fp4') -> "Params4bit":
if data is None:
data = torch.empty(0)
self = torch.Tensor._make_subclass(cls, data, requires_grad)
self.blocksize = blocksize
self.compress_statistics = compress_statistics
self.quant_type = quant_type
self.quant_state = quant_state
+ self.quant_storage = quant_storage
+ self.bnb_quantized = bnb_quantized
self.data = data
+ self.module = module
return self
===========changed ref 6===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
def to(self, *args, **kwargs):
device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)
+ if (device is not None and device.type == "cuda" and not self.bnb_quantized):
- if (device is not None and device.type == "cuda" and self.data.device.type == "cpu"):
+ return self._quantize(device)
- return self.cuda(device)
else:
if self.quant_state is not None:
self.quant_state.to(device)
new_param = Params4bit(super().to(device=device, dtype=dtype, non_blocking=non_blocking),
requires_grad=self.requires_grad, quant_state=self.quant_state,
blocksize=self.blocksize, compress_statistics=self.compress_statistics,
quant_type=self.quant_type)
return new_param
|
bitsandbytes.functional/quantize_fp4
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
dcfb6f81433e37a8546f7dab3f648eaf858b29ff
|
Initial FSDP Support for QLoRA Finetuning (#970)
|
<0>:<add> return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4', quant_storage)
<del> return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
|
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_storage=torch.uint8):
- def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
<0> return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
<1>
|
===========changed ref 0===========
# module: bitsandbytes.nn.modules
class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_storage=torch.uint8, device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, device=None):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4', quant_storage, device)
- super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4', device)
===========changed ref 1===========
# module: bitsandbytes.nn.modules
class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_storage=torch.uint8, device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, device=None):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4', quant_storage, device)
- super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4', device)
===========changed ref 2===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
@classmethod
def from_prequantized(cls, data: torch.Tensor, quantized_stats: Dict[str, Any], requires_grad: bool = False, device='cuda', **kwargs) -> "Params4bit":
self = torch.Tensor._make_subclass(cls, data.to(device))
self.requires_grad = requires_grad
self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
self.blocksize = self.quant_state.blocksize
self.compress_statistics = self.quant_state.nested
self.quant_type = self.quant_state.quant_type
+ self.bnb_quantized = True
return self
===========changed ref 3===========
<s> str = 'fp4',
+ quant_storage: torch.dtype = torch.uint8,
+ module: Optional["Linear4bit"] = None,
+ bnb_quantized: bool = False
+ ) -> "Params4bit":
- def __new__(cls, data: Optional[torch.Tensor] = None, requires_grad=True, quant_state: QuantState = None, blocksize: int = 64, compress_statistics: bool = True, quant_type: str = 'fp4') -> "Params4bit":
if data is None:
data = torch.empty(0)
self = torch.Tensor._make_subclass(cls, data, requires_grad)
self.blocksize = blocksize
self.compress_statistics = compress_statistics
self.quant_type = quant_type
self.quant_state = quant_state
+ self.quant_storage = quant_storage
+ self.bnb_quantized = bnb_quantized
self.data = data
+ self.module = module
return self
===========changed ref 4===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
+ def cuda(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):
- def cuda(self, device):
- w = self.data.contiguous().half().cuda(device)
- w_4bit, quant_state = bnb.functional.quantize_4bit(w, blocksize=self.blocksize, compress_statistics=self.compress_statistics, quant_type=self.quant_type)
- self.data = w_4bit
- self.quant_state = quant_state
+ return self.to(device='cuda' if device is None else device, non_blocking=non_blocking)
- return self
-
===========changed ref 5===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
+ def _quantize(self, device):
+ w = self.data.contiguous().cuda(device)
+ w_4bit, quant_state = bnb.functional.quantize_4bit(w, blocksize=self.blocksize, compress_statistics=self.compress_statistics,
+ quant_type=self.quant_type, quant_storage=self.quant_storage)
+ self.data = w_4bit
+ self.quant_state = quant_state
+ if self.module is not None:
+ self.module.quant_state = quant_state
+ self.bnb_quantized = True
+ return self
+
===========changed ref 6===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4', quant_storage=torch.uint8, device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4', device=None):
super().__init__(input_features, output_features, bias, device)
+ self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type, quant_storage=quant_storage, module=self)
- self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
# self.persistent_buffers = [] # TODO consider as way to save quant state
self.compute_dtype = compute_dtype
self.compute_type_is_set = False
+ self.quant_state = None
+ self.quant_storage = quant_storage
===========changed ref 7===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
def to(self, *args, **kwargs):
device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)
+ if (device is not None and device.type == "cuda" and not self.bnb_quantized):
- if (device is not None and device.type == "cuda" and self.data.device.type == "cpu"):
+ return self._quantize(device)
- return self.cuda(device)
else:
if self.quant_state is not None:
self.quant_state.to(device)
new_param = Params4bit(super().to(device=device, dtype=dtype, non_blocking=non_blocking),
requires_grad=self.requires_grad, quant_state=self.quant_state,
blocksize=self.blocksize, compress_statistics=self.compress_statistics,
quant_type=self.quant_type)
return new_param
|
bitsandbytes.functional/quantize_nf4
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
dcfb6f81433e37a8546f7dab3f648eaf858b29ff
|
Initial FSDP Support for QLoRA Finetuning (#970)
|
<0>:<add> return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4', quant_storage)
<del> return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
|
# module: bitsandbytes.functional
+ def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_storage=torch.uint8):
- def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
<0> return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
<1>
|
===========changed ref 0===========
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_storage=torch.uint8):
- def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4', quant_storage)
- return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
===========changed ref 1===========
# module: bitsandbytes.nn.modules
class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_storage=torch.uint8, device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, device=None):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4', quant_storage, device)
- super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4', device)
===========changed ref 2===========
# module: bitsandbytes.nn.modules
class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_storage=torch.uint8, device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, device=None):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4', quant_storage, device)
- super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4', device)
===========changed ref 3===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
@classmethod
def from_prequantized(cls, data: torch.Tensor, quantized_stats: Dict[str, Any], requires_grad: bool = False, device='cuda', **kwargs) -> "Params4bit":
self = torch.Tensor._make_subclass(cls, data.to(device))
self.requires_grad = requires_grad
self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
self.blocksize = self.quant_state.blocksize
self.compress_statistics = self.quant_state.nested
self.quant_type = self.quant_state.quant_type
+ self.bnb_quantized = True
return self
===========changed ref 4===========
<s> str = 'fp4',
+ quant_storage: torch.dtype = torch.uint8,
+ module: Optional["Linear4bit"] = None,
+ bnb_quantized: bool = False
+ ) -> "Params4bit":
- def __new__(cls, data: Optional[torch.Tensor] = None, requires_grad=True, quant_state: QuantState = None, blocksize: int = 64, compress_statistics: bool = True, quant_type: str = 'fp4') -> "Params4bit":
if data is None:
data = torch.empty(0)
self = torch.Tensor._make_subclass(cls, data, requires_grad)
self.blocksize = blocksize
self.compress_statistics = compress_statistics
self.quant_type = quant_type
self.quant_state = quant_state
+ self.quant_storage = quant_storage
+ self.bnb_quantized = bnb_quantized
self.data = data
+ self.module = module
return self
===========changed ref 5===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
+ def cuda(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):
- def cuda(self, device):
- w = self.data.contiguous().half().cuda(device)
- w_4bit, quant_state = bnb.functional.quantize_4bit(w, blocksize=self.blocksize, compress_statistics=self.compress_statistics, quant_type=self.quant_type)
- self.data = w_4bit
- self.quant_state = quant_state
+ return self.to(device='cuda' if device is None else device, non_blocking=non_blocking)
- return self
-
===========changed ref 6===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
+ def _quantize(self, device):
+ w = self.data.contiguous().cuda(device)
+ w_4bit, quant_state = bnb.functional.quantize_4bit(w, blocksize=self.blocksize, compress_statistics=self.compress_statistics,
+ quant_type=self.quant_type, quant_storage=self.quant_storage)
+ self.data = w_4bit
+ self.quant_state = quant_state
+ if self.module is not None:
+ self.module.quant_state = quant_state
+ self.bnb_quantized = True
+ return self
+
===========changed ref 7===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4', quant_storage=torch.uint8, device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4', device=None):
super().__init__(input_features, output_features, bias, device)
+ self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type, quant_storage=quant_storage, module=self)
- self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
# self.persistent_buffers = [] # TODO consider as way to save quant state
self.compute_dtype = compute_dtype
self.compute_type_is_set = False
+ self.quant_state = None
+ self.quant_storage = quant_storage
===========changed ref 8===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
def to(self, *args, **kwargs):
device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)
+ if (device is not None and device.type == "cuda" and not self.bnb_quantized):
- if (device is not None and device.type == "cuda" and self.data.device.type == "cpu"):
+ return self._quantize(device)
- return self.cuda(device)
else:
if self.quant_state is not None:
self.quant_state.to(device)
new_param = Params4bit(super().to(device=device, dtype=dtype, non_blocking=non_blocking),
requires_grad=self.requires_grad, quant_state=self.quant_state,
blocksize=self.blocksize, compress_statistics=self.compress_statistics,
quant_type=self.quant_type)
return new_param
|
bitsandbytes.functional/quantize_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
dcfb6f81433e37a8546f7dab3f648eaf858b29ff
|
Initial FSDP Support for QLoRA Finetuning (#970)
|
<12>:<add> The output tensor.
<del> The output tensor (8-bit).
<21>:<add> Tensor with packed 4-bit values.
<del> The 8-bit tensor with packed 4-bit values.
<40>:<add> mod = dtype2bytes[quant_storage] * 2
<add> out = torch.zeros(((n+1)//mod, 1), dtype=quant_storage, device=A.device)
<del> out
|
# module: bitsandbytes.functional
+ def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4', quant_storage=torch.uint8) -> Tensor:
- def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4') -> Tensor:
<0> """
<1> Quantize tensor A in blocks of 4-bit values.
<2>
<3> Quantizes tensor A by dividing it into blocks which are independently quantized to FP4.
<4>
<5> Parameters
<6> ----------
<7> A : torch.Tensor
<8> The input tensor.
<9> absmax : torch.Tensor
<10> The absmax values.
<11> out : torch.Tensor
<12> The output tensor (8-bit).
<13> blocksize : int
<14> The blocksize used in quantization.
<15> quant_type : str
<16> The 4-bit quantization data type {fp4, nf4}
<17>
<18> Returns
<19> -------
<20> torch.Tensor:
<21> The 8-bit tensor with packed 4-bit values.
<22> tuple(torch.Tensor, torch.Size, torch.dtype, int):
<23> The quantization state to undo the quantization.
<24> """
<25> if A.device.type != 'cuda':
<26> raise NotImplementedError(f'Device type not supported for FP4 quantization: {A.device.type}')
<27> if quant_type not in ['fp4', 'nf4']:
<28> raise NotImplementedError(f'4-bit quantization data type {quant_type} is not implemented.')
<29>
<30> n = A.numel()
<31> input_shape = A.shape
<32>
<33> if absmax is None:
<34> blocks = n // blocksize
<35> blocks += 1 if n % blocksize > 0 else 0
<36> absmax = torch.zeros((blocks,), device=A.device, dtype=torch.float32)
<37>
<38>
<39> if out is None:
<40> out</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
+ def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4', quant_storage=torch.uint8) -> Tensor:
- def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4') -> Tensor:
# offset: 1
assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64]
prev_device = pre_call(A.device)
is_on_gpu([A, out, absmax])
if A.dtype == torch.float32:
if quant_type == 'fp4':
lib.cquantize_blockwise_fp32_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
else:
lib.cquantize_blockwise_fp32_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
elif A.dtype == torch.float16:
if quant_type == 'fp4':
lib.cquantize_blockwise_fp16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
else:
lib.cquantize_blockwise_fp16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
elif A.dtype == torch.bfloat16:
if quant_type == 'fp4':
lib.cquantize_blockwise_bf16_fp4(get_ptr(</s>
===========below chunk 1===========
# module: bitsandbytes.functional
+ def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4', quant_storage=torch.uint8) -> Tensor:
- def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4') -> Tensor:
# offset: 2
<s> if quant_type == 'fp4':
lib.cquantize_blockwise_bf16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
else:
lib.cquantize_blockwise_bf16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
else:
raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
post_call(A.device)
code = get_4bit_type(quant_type, device=A.device)
if compress_statistics:
offset = absmax.mean()
absmax -= offset
qabsmax, state2 = quantize_blockwise(absmax, blocksize=256)
del absmax
state = QuantState(absmax=qabsmax, shape=input_shape, dtype=A.dtype, blocksize=blocksize, code=code, quant_type=quant_type, offset=offset, state2=state2)
else:
state = QuantState(absmax=absmax, shape=input_shape, dtype=A.dtype, blocksize=blocksize, code=code, quant_type=quant_type, )
return out, state
===========changed ref 0===========
# module: bitsandbytes.functional
+ def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_storage=torch.uint8):
- def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4', quant_storage)
- return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
===========changed ref 1===========
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_storage=torch.uint8):
- def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4', quant_storage)
- return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
===========changed ref 2===========
# module: bitsandbytes.nn.modules
class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_storage=torch.uint8, device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, device=None):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4', quant_storage, device)
- super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4', device)
===========changed ref 3===========
# module: bitsandbytes.nn.modules
class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_storage=torch.uint8, device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, device=None):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4', quant_storage, device)
- super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4', device)
===========changed ref 4===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
@classmethod
def from_prequantized(cls, data: torch.Tensor, quantized_stats: Dict[str, Any], requires_grad: bool = False, device='cuda', **kwargs) -> "Params4bit":
self = torch.Tensor._make_subclass(cls, data.to(device))
self.requires_grad = requires_grad
self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
self.blocksize = self.quant_state.blocksize
self.compress_statistics = self.quant_state.nested
self.quant_type = self.quant_state.quant_type
+ self.bnb_quantized = True
return self
|
bitsandbytes.functional/gemv_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
dcfb6f81433e37a8546f7dab3f648eaf858b29ff
|
Initial FSDP Support for QLoRA Finetuning (#970)
|
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
<0> prev_device = pre_call(A.device)
<1> #sout = check_matmul(A, B, out, transposed_A, transposed_B, expected_type=A.dtype)
<2> if state is None:
<3> raise ValueError(f'state cannot None. gem_4bit( ) requires the state from quantize_4bit( )')
<4>
<5> if A.numel() != A.shape[-1]:
<6> raise ValueError(f'Dimensions of A are invalid. Must be a vector with the leading dimensions of "1", e.g. [1, 1, 2048]')
<7>
<8> Bshape = state.shape
<9> bout = Bshape[0]
<10> absmax = state.absmax
<11> if state.nested:
<12> absmax = dequantize_blockwise(state.absmax, state.state2)
<13> absmax += state.offset
<14>
<15> if out is None:
<16> if len(A.shape) == 3:
<17> out = torch.empty(size=(A.shape[0], A.shape[1], bout), dtype=A.dtype, device=A.device)
<18> else:
<19> out = torch.empty(size=(A.shape[0], bout), dtype=A.dtype, device=A.device)
<20>
<21> n = 1
<22> m = Bshape[0]
<23> k = Bshape[1]
<24> lda = Bshape[0]
<25> ldc = Bshape[0]
<26> ldb = (A.shape[-1]+1)//2
<27> is_on_gpu([B, A, out, absmax, state.code])
<28> m = ct.c_int32(m)
<29> n = ct.c_int32(n)
<30> k = ct.c_int32(k)
<31> lda</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Tensor = None,
transposed_A=False,
transposed_B=False,
state=None
):
# offset: 1
ldb = ct.c_int32(ldb)
ldc = ct.c_int32(ldc)
if B.dtype == torch.uint8:
if A.dtype == torch.float16:
lib.cgemm_4bit_inference_naive_fp16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(state.code), get_ptr(out), lda, ldb, ldc, ct.c_int32(state.blocksize))
elif A.dtype == torch.bfloat16:
lib.cgemm_4bit_inference_naive_bf16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(state.code), get_ptr(out), lda, ldb, ldc, ct.c_int32(state.blocksize))
elif A.dtype == torch.float32:
lib.cgemm_4bit_inference_naive_fp32(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(state.code), get_ptr(out), lda, ldb, ldc, ct.c_int32(state.blocksize))
else:
raise NotImplementedError(f'Matmul not implemented for data type {A.dtype}')
else:
raise NotImplementedError(f'Matmul not implemented for data type {A.dtype}')
post_call(prev_device)
return out
===========changed ref 0===========
# module: bitsandbytes.functional
+ def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_storage=torch.uint8):
- def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4', quant_storage)
- return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
===========changed ref 1===========
# module: bitsandbytes.functional
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_storage=torch.uint8):
- def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4', quant_storage)
- return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
===========changed ref 2===========
# module: bitsandbytes.nn.modules
class LinearFP4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_storage=torch.uint8, device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, device=None):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4', quant_storage, device)
- super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4', device)
===========changed ref 3===========
# module: bitsandbytes.nn.modules
class LinearNF4(Linear4bit):
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_storage=torch.uint8, device=None):
- def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, device=None):
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4', quant_storage, device)
- super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4', device)
===========changed ref 4===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
@classmethod
def from_prequantized(cls, data: torch.Tensor, quantized_stats: Dict[str, Any], requires_grad: bool = False, device='cuda', **kwargs) -> "Params4bit":
self = torch.Tensor._make_subclass(cls, data.to(device))
self.requires_grad = requires_grad
self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
self.blocksize = self.quant_state.blocksize
self.compress_statistics = self.quant_state.nested
self.quant_type = self.quant_state.quant_type
+ self.bnb_quantized = True
return self
===========changed ref 5===========
<s> str = 'fp4',
+ quant_storage: torch.dtype = torch.uint8,
+ module: Optional["Linear4bit"] = None,
+ bnb_quantized: bool = False
+ ) -> "Params4bit":
- def __new__(cls, data: Optional[torch.Tensor] = None, requires_grad=True, quant_state: QuantState = None, blocksize: int = 64, compress_statistics: bool = True, quant_type: str = 'fp4') -> "Params4bit":
if data is None:
data = torch.empty(0)
self = torch.Tensor._make_subclass(cls, data, requires_grad)
self.blocksize = blocksize
self.compress_statistics = compress_statistics
self.quant_type = quant_type
self.quant_state = quant_state
+ self.quant_storage = quant_storage
+ self.bnb_quantized = bnb_quantized
self.data = data
+ self.module = module
return self
===========changed ref 6===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
+ def cuda(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):
- def cuda(self, device):
- w = self.data.contiguous().half().cuda(device)
- w_4bit, quant_state = bnb.functional.quantize_4bit(w, blocksize=self.blocksize, compress_statistics=self.compress_statistics, quant_type=self.quant_type)
- self.data = w_4bit
- self.quant_state = quant_state
+ return self.to(device='cuda' if device is None else device, non_blocking=non_blocking)
- return self
-
===========changed ref 7===========
# module: bitsandbytes.nn.modules
class Params4bit(torch.nn.Parameter):
+ def _quantize(self, device):
+ w = self.data.contiguous().cuda(device)
+ w_4bit, quant_state = bnb.functional.quantize_4bit(w, blocksize=self.blocksize, compress_statistics=self.compress_statistics,
+ quant_type=self.quant_type, quant_storage=self.quant_storage)
+ self.data = w_4bit
+ self.quant_state = quant_state
+ if self.module is not None:
+ self.module.quant_state = quant_state
+ self.bnb_quantized = True
+ return self
+
|
|
tests.test_linear4bit/test_linear_serialization
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
dcfb6f81433e37a8546f7dab3f648eaf858b29ff
|
Initial FSDP Support for QLoRA Finetuning (#970)
|
<17>:<add> new_weight = bnb.nn.Params4bit(data=linear.weight, quant_type=quant_type, requires_grad=False)
<del> new_weight = bnb.nn.Params4bit(data=linear.weight, requires_grad=False)
|
<s>, compress_statistics, bias",
+ list(product(["nf4", "fp4"], [False, True], [False, True], ['uint8', 'float16', 'bfloat16', 'float32'])),
- list(product(["nf4", "fp4"], [False, True], [False, True])),
)
+ def test_linear_serialization(quant_type, compress_statistics, bias, quant_storage):
- def test_linear_serialization(quant_type, compress_statistics, bias):
<0> original_dtype = torch.float16
<1> compute_dtype = None
<2> device = "cuda"
<3> layer_shape = (300, 400)
<4>
<5> linear = torch.nn.Linear(*layer_shape, dtype=original_dtype, device="cpu") # original layer
<6>
<7> # Quantizing original layer
<8> linear_q = bnb.nn.Linear4bit(
<9> linear.in_features,
<10> linear.out_features,
<11> bias=bias,
<12> compute_dtype=compute_dtype,
<13> compress_statistics=compress_statistics,
<14> quant_type=quant_type,
<15> device="meta",
<16> )
<17> new_weight = bnb.nn.Params4bit(data=linear.weight, requires_grad=False)
<18> linear_q.weight = new_weight
<19> if bias:
<20> linear_q.bias = torch.nn.Parameter(linear.bias)
<21> linear_q = linear_q.to(device)
<22>
<23> # saving to state_dict:
<24> sd = linear_q.state_dict()
<25>
<26> # restoring from state_dict:
<27> bias_data2 = sd.pop("bias", None)
<28> weight_data2 = sd.pop("weight")
<29> weight2 = bnb.nn.Params4bit.from_prequantized(quantized_stats=sd, data=weight_data2)
<30>
<31> # creating new layer with same params:
<32> linear_q2 = bnb.nn.Linear4bit(
<33> linear</s>
|
===========below chunk 0===========
<s> bias",
+ list(product(["nf4", "fp4"], [False, True], [False, True], ['uint8', 'float16', 'bfloat16', 'float32'])),
- list(product(["nf4", "fp4"], [False, True], [False, True])),
)
+ def test_linear_serialization(quant_type, compress_statistics, bias, quant_storage):
- def test_linear_serialization(quant_type, compress_statistics, bias):
# offset: 1
linear.out_features,
bias=bias,
compute_dtype=compute_dtype,
compress_statistics=compress_statistics,
quant_type=quant_type,
device="meta",
)
# loading weights from state_dict:
linear_q2.weight = weight2
if bias:
linear_q2.bias = torch.nn.Parameter(bias_data2)
linear_q2 = linear_q2.to(device)
# MATCHING
a, b = linear_q.weight, linear_q2.weight
assert a.device == b.device
assert a.dtype == b.dtype
assert torch.equal(a, b)
q0 = a.quant_state
q1 = b.quant_state
for attr in ('code', 'dtype', 'blocksize', 'absmax'):
c, d = getattr(q0, attr), getattr(q1, attr)
if isinstance(c, torch.Tensor):
assert torch.equal(c, d)
else:
assert c == d, f"{c} != {d}"
if q0.state2 is not None:
for attr in ('code', 'dtype', 'blocksize', 'absmax'):
c, d = getattr(q0.state2, attr), getattr(q1.state2, attr)
if isinstance(c, torch.Tensor):
assert torch.equal(c, d)
else:
assert c == d, f"{c} != {d}"
if bias:
a, b</s>
===========below chunk 1===========
<s> bias",
+ list(product(["nf4", "fp4"], [False, True], [False, True], ['uint8', 'float16', 'bfloat16', 'float32'])),
- list(product(["nf4", "fp4"], [False, True], [False, True])),
)
+ def test_linear_serialization(quant_type, compress_statistics, bias, quant_storage):
- def test_linear_serialization(quant_type, compress_statistics, bias):
# offset: 2
<s>
else:
assert c == d, f"{c} != {d}"
if bias:
a, b = linear_q.bias, linear_q2.bias
assert a.device == b.device
assert a.dtype == b.dtype
assert torch.equal(a, b)
# Forward test
x = torch.rand(42, layer_shape[0], device=device)
a = linear_q(x)
b = linear_q2(x)
assert a.device == b.device
assert a.dtype == b.dtype
assert torch.equal(a, b)
# Saved size ratio test. Target set for layer_shape == (300, 400) w/ bias
with TemporaryDirectory() as tmpdir:
state_path_4bit = os.path.join(tmpdir, "state_4bit.pth")
state_path = os.path.join(tmpdir, "state.pth")
torch.save(linear.state_dict(), state_path)
torch.save(linear_q.state_dict(), state_path_4bit)
size_orig, size_4 = os.path.getsize(state_path), os.path.getsize(
state_path_4bit
)
size_ratio = size_4 / size_orig
target_compression = 0.143 if original_dtype == torch.float32 else 0.29 # these numbers get lower</s>
===========below chunk 2===========
<s> bias",
+ list(product(["nf4", "fp4"], [False, True], [False, True], ['uint8', 'float16', 'bfloat16', 'float32'])),
- list(product(["nf4", "fp4"], [False, True], [False, True])),
)
+ def test_linear_serialization(quant_type, compress_statistics, bias, quant_storage):
- def test_linear_serialization(quant_type, compress_statistics, bias):
# offset: 3
<s> shape increases
ratio_error_msg = f"quantized_size {size_4:,} is larger on disk than {target_compression:.2%} of original size {size_orig:,}"
assert size_ratio < target_compression, ratio_error_msg
===========unchanged ref 0===========
at: _pytest.mark.structures
MARK_GEN = MarkGenerator(_ispytest=True)
at: _pytest.mark.structures.MarkGenerator
skip: _SkipMarkDecorator
skipif: _SkipifMarkDecorator
xfail: _XfailMarkDecorator
parametrize: _ParametrizeMarkDecorator
usefixtures: _UsefixturesMarkDecorator
filterwarnings: _FilterwarningsMarkDecorator
at: bitsandbytes.nn.modules
Params4bit(data: Tensor=..., requires_grad: builtins.bool=...)
Linear4bit(input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4', device=None)
at: bitsandbytes.nn.modules.Linear4bit.__init__
self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
at: bitsandbytes.nn.modules.Params4bit
from_prequantized(data: torch.Tensor, quantized_stats: Dict[str, Any], requires_grad: bool=False, device='cuda', **kwargs) -> "Params4bit"
at: bitsandbytes.nn.modules.Params4bit.cuda
self.quant_state = quant_state
===========unchanged ref 1===========
at: itertools
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4], iter5: Iterable[_T5], iter6: Iterable[_T6]) -> Iterator[Tuple[_T1, _T2, _T3, _T4, _T5, _T6]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3]) -> Iterator[Tuple[_T1, _T2, _T3]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4]) -> Iterator[Tuple[_T1, _T2, _T3, _T4]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2]) -> Iterator[Tuple[_T1, _T2]]
product(*iterables: Iterable[_T1], repeat: int) -> Iterator[Tuple[_T1, ...]]
product(iter1: Iterable[_T1]) -> Iterator[Tuple[_T1]]
product(*iterables: Iterable[Any], repeat: int=...) -> Iterator[Tuple[Any, ...]]
product(iter1: Iterable[Any], iter2: Iterable[Any], iter3: Iterable[Any], iter4: Iterable[Any], iter5: Iterable[Any], iter6: Iterable[Any], iter7: Iterable[Any], *iterables: Iterable[Any]) -> Iterator[Tuple[Any, ...]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4], iter5: Iterable[_T5]) -> Iterator[Tuple[_T1, _T2, _T3, _T4, _T5]]
|
tests.test_functional/test_gemv_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
dcfb6f81433e37a8546f7dab3f648eaf858b29ff
|
Initial FSDP Support for QLoRA Finetuning (#970)
|
<s> torch.float32], ids=['fp16', 'bf16', 'fp32'])
+ @pytest.mark.parametrize("quant_storage", [torch.uint8, torch.float16, torch.bfloat16, torch.float32], ids=['uint8', 'fp16', 'bf16', 'fp32'])
+ def test_gemv_4bit(dtype, storage_type, quant_storage, double_quant, kind):
- def test_gemv_4bit(dtype, storage_type, double_quant, kind):
<0> for dim in [128, 256, 512, 1024]:
<1> #for dim in [4*1024]:
<2> #for dim in [1*16]:
<3> errs1 = []
<4> errs2 = []
<5> errs3 = []
<6> relerrs1 = []
<7> relerrs2 = []
<8> relerrs3 = []
<9> max_errs1 = []
<10> max_errs2 = []
<11> max_errs3 = []
<12>
<13>
<14> for i in range(100):
<15> if kind == 'fc1':
<16> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<17> B = torch.randn(dim*4, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<18> elif kind == 'fc2':
<19> A = torch.randn(1, 4*dim, dtype=dtype, device='cuda')
<20> B = torch.randn(dim, 4*dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<21> elif kind == 'attn':
<22> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<23> B = torch.randn(dim, dim, dtype=dtype, device='cuda')/math.sqrt(dim)
<24> elif kind == 'attn_packed':
<25> A = torch.randn(1, dim, dtype=dtype, device='cuda')
<26> B = torch.randn(dim*3, dim, dtype=dtype, device</s>
|
===========below chunk 0===========
<s> ids=['fp16', 'bf16', 'fp32'])
+ @pytest.mark.parametrize("quant_storage", [torch.uint8, torch.float16, torch.bfloat16, torch.float32], ids=['uint8', 'fp16', 'bf16', 'fp32'])
+ def test_gemv_4bit(dtype, storage_type, quant_storage, double_quant, kind):
- def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 1
qB, state = F.quantize_4bit(B, quant_type=storage_type, compress_statistics=double_quant)
C3 = torch.matmul(A, B.t())
C2 = F.gemv_4bit(A, qB.t(), state=state)
A.requires_grad = True
C1 = bnb.matmul_4bit(A, qB.t(), state)
err1 = (C1-C2).abs().float()
err2 = (C3-C2).abs().float()
err3 = (C3-C1).abs().float()
mag1 = torch.abs(C1).float()+1e-5
mag2 = torch.abs(C3).float()+1e-5
mag3 = torch.abs(C3).float()+1e-5
relerr1 = err1/mag1
relerr2 = err2/mag2
relerr3 = err3/mag3
max_err1 = err1.max()
max_err2 = err2.max()
max_err3 = err3.max()
errs1.append(err1.mean().item())
errs2.append(err2.mean().item())
errs3.append(err3.mean().item())
relerrs1.append(relerr1.mean().item())
relerrs2.append(relerr2.mean().item())
relerrs3.append(relerr3.mean().item())
max_</s>
===========below chunk 1===========
<s> ids=['fp16', 'bf16', 'fp32'])
+ @pytest.mark.parametrize("quant_storage", [torch.uint8, torch.float16, torch.bfloat16, torch.float32], ids=['uint8', 'fp16', 'bf16', 'fp32'])
+ def test_gemv_4bit(dtype, storage_type, quant_storage, double_quant, kind):
- def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 2
<s>relerr2.mean().item())
relerrs3.append(relerr3.mean().item())
max_errs1.append(max_err1.item())
max_errs2.append(max_err2.item())
max_errs3.append(max_err3.item())
c = int(C1.numel()*0.0014*(dim/256))+1
c = assert_all_approx_close(C1, C2, 1e-5, 0.01, count=c, throw=False)
err1 = sum(errs1)/len(errs1)/math.sqrt(dim)
err2 = sum(errs2)/len(errs2)/math.sqrt(dim)
err3 = sum(errs3)/len(errs3)/math.sqrt(dim)
relerr1 = sum(relerrs1)/len(relerrs1)/math.sqrt(dim)
relerr2 = sum(relerrs2)/len(relerrs2)/math.sqrt(dim)
relerr3 = sum(relerrs3)/len(relerrs3)/math.sqrt(dim)
maxerr1 = sum(max_errs1)/len(max_errs1)/math.sqrt(dim)
maxerr2 = sum(max_errs2)/len(max_errs2)/math.sqrt(dim)
maxerr3 = sum(max</s>
===========below chunk 2===========
<s> ids=['fp16', 'bf16', 'fp32'])
+ @pytest.mark.parametrize("quant_storage", [torch.uint8, torch.float16, torch.bfloat16, torch.float32], ids=['uint8', 'fp16', 'bf16', 'fp32'])
+ def test_gemv_4bit(dtype, storage_type, quant_storage, double_quant, kind):
- def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 3
<s>s3)/len(max_errs3)/math.sqrt(dim)
absratio = err2/err3
relratio = relerr2/relerr3
maxratio = relerr2/relerr3
# for debugging if the tests fails
#
#print('='*80)
#print(f'For matmul: {A.shape}, {B.shape}, {kind}, {dtype}, {storage_type}, double_quant={double_quant}:')
#print(C1.flatten()[-20:])
#print(C2.flatten()[-20:])
#print(f'inference vs training abs: {err1}')
#print(f'inference vs training rel: {relerr1}')
#print(f'inference vs training max: {maxerr1}')
#print(f'inference vs training vs torch err ratio abs: {absratio}')
#print(f'inference vs training vs torch err ratio rel: {relratio}')
#print(f'inference vs training vs torch err ratio max: {maxratio}')
if dtype == torch.float16:
if dim <= 512:
assert err1 < 7e-5
assert relerr1 < 0.0008
else:
assert err1 < 6e-5
assert relerr1 < 2e-4
assert absratio < 1.005 and absratio > 0.995
assert relratio <</s>
===========below chunk 3===========
<s> ids=['fp16', 'bf16', 'fp32'])
+ @pytest.mark.parametrize("quant_storage", [torch.uint8, torch.float16, torch.bfloat16, torch.float32], ids=['uint8', 'fp16', 'bf16', 'fp32'])
+ def test_gemv_4bit(dtype, storage_type, quant_storage, double_quant, kind):
- def test_gemv_4bit(dtype, storage_type, double_quant, kind):
# offset: 4
<s>005 and relratio > 0.995
assert maxratio < 1.005 and maxratio > 0.995
elif dtype == torch.float32:
if dim <= 512:
assert err1 < 5e-8
assert relerr1 < 1e-6
assert maxerr1 < 1e-7
else:
assert err1 < 5e-8
assert relerr1 < 8e-6
assert maxerr1 < 1e-7
assert absratio < 1.005 and absratio > 0.995
assert relratio < 1.005 and relratio > 0.995
assert maxratio < 1.005 and maxratio > 0.995
elif dtype == torch.bfloat16:
if dim <= 512:
assert err1 < 6e-4
assert relerr1 < 0.007
assert maxerr1 < 0.015
else:
assert err1 < 2e-4
assert relerr1 < 0.002
assert maxerr1 < 0.0012
assert absratio < 1.005 and absratio > 0.995
assert relratio < 1.04 and relratio > 0.96
assert maxratio < 1.02 and maxratio > 0.98
===========changed ref 0===========
# module: tests.test_linear4bit
+ storage = {
+ 'uint8': torch.uint8,
+ 'float16': torch.float16,
+ 'bfloat16': torch.bfloat16,
+ 'float32': torch.float32
+ }
|
|
bitsandbytes.cuda_setup.main/CUDASetup.generate_instructions
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
3e69e8fc0cba9b069719367a4d3cb05e07b1a0b0
|
Bump CUDA 12.2.0 to 12.2.1, fix setup support for Cuda 12.1 (#703), Sort compute capabilities sets to select max
|
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
<0> if getattr(self, 'error', False): return
<1> print(self.error)
<2> self.error = True
<3> if not self.cuda_available:
<4> self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected or CUDA not installed.')
<5> self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.')
<6> self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:')
<7> self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null')
<8> self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a')
<9> self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc')
<10> self.add_log_entry('CUDA SETUP: Solution 3): For a missing CUDA runtime library (libcudart.so), use `find / -name libcudart.so* and follow with step (2b)')
<11> return
<12>
<13> if self.cudart_path is None:
<14> self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.')
<15> self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable')
<16> self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda</s>
|
===========below chunk 0===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 1
self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a')
self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from 1b into your .bashrc file, located at ~/.bashrc')
self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.')
self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://raw.githubusercontent.com/TimDettmers/bitsandbytes/main/cuda_install.sh')
self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.')
self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local')
return
make_cmd = f'CUDA_VERSION={self.cuda_version_string}'
if len(self.cuda_version_string) < 3:
make_cmd += ' make cuda92'
elif self.cuda_version_string == '110':
make_cmd += ' make cuda110'
elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0:
make_cmd += ' make cuda11x'
elif self.cuda_version_string == '100':
self.add_log_entry('CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.')
self.add_log_</s>
===========below chunk 1===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 2
<s>CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.')
self.add_log_entry('CUDA SETUP: Before you try again running bitsandbytes, make sure old CUDA 10.0 versions are uninstalled and removed from $LD_LIBRARY_PATH variables.')
return
has_cublaslt = is_cublasLt_compatible(self.cc)
if not has_cublaslt:
make_cmd += '_nomatmul'
self.add_log_entry('CUDA SETUP: Something unexpected happened. Please compile from source:')
self.add_log_entry('git clone https://github.com/TimDettmers/bitsandbytes.git')
self.add_log_entry('cd bitsandbytes')
self.add_log_entry(make_cmd)
self.add_log_entry('python setup.py install')
===========unchanged ref 0===========
at: bitsandbytes.cuda_setup.main
is_cublasLt_compatible(cc)
at: bitsandbytes.cuda_setup.main.CUDASetup
_instance = None
add_log_entry(msg, is_warning=False)
at: bitsandbytes.cuda_setup.main.CUDASetup.initialize
self.error = False
at: bitsandbytes.cuda_setup.main.CUDASetup.run_cuda_setup
self.cudart_path = cudart_path
self.cuda_available = torch.cuda.is_available()
self.cc = cc
self.cuda_version_string = cuda_version_string
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
# these are the most common libs names
# libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
# we have libcudart.so.11.0 which causes a lot of errors before
# not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
+ CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
- CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0']
# this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths
backup_paths = []
backup_paths.append('$CONDA_PREFIX/lib/libcudart.so.11.0')
|
|
bitsandbytes.cuda_setup.main/get_compute_capabilities
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
3e69e8fc0cba9b069719367a4d3cb05e07b1a0b0
|
Bump CUDA 12.2.0 to 12.2.1, fix setup support for Cuda 12.1 (#703), Sort compute capabilities sets to select max
|
<5>:<add> ccs.sort(key=lambda v: tuple(map(int, str(v).split("."))))
<add>
|
# module: bitsandbytes.cuda_setup.main
def get_compute_capabilities():
<0> ccs = []
<1> for i in range(torch.cuda.device_count()):
<2> cc_major, cc_minor = torch.cuda.get_device_capability(torch.cuda.device(i))
<3> ccs.append(f"{cc_major}.{cc_minor}")
<4>
<5> return ccs
<6>
|
===========unchanged ref 0===========
at: bitsandbytes.cuda_setup.main.get_cuda_version
major, minor = map(int, torch.version.cuda.split("."))
major, minor = map(int, torch.version.cuda.split("."))
at: torch.cuda
device(device: Any)
get_device_capability(device: Optional[_device_t]=None) -> Tuple[int, int]
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
# these are the most common libs names
# libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
# we have libcudart.so.11.0 which causes a lot of errors before
# not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
+ CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
- CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0']
# this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths
backup_paths = []
backup_paths.append('$CONDA_PREFIX/lib/libcudart.so.11.0')
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
if getattr(self, 'error', False): return
print(self.error)
self.error = True
if not self.cuda_available:
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected or CUDA not installed.')
self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.')
self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:')
self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a')
self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc')
self.add_log_entry('CUDA SETUP: Solution 3): For a missing CUDA runtime library (libcudart.so), use `find / -name libcudart.so* and follow with step (2b)')
return
if self.cudart_path is None:
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.')
self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable')
self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev</s>
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 1
<s>('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev/null')
self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a')
self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from 1b into your .bashrc file, located at ~/.bashrc')
self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.')
self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://raw.githubusercontent.com/TimDettmers/bitsandbytes/main/cuda_install.sh')
self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.')
self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local')
return
make_cmd = f'CUDA_VERSION={self.cuda_version_string}'
if len(self.cuda_version_string) < 3:
make_cmd += ' make cuda92'
elif self.cuda_version_string == '110':
make_cmd += ' make cuda110'
elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0:</s>
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def generate_instructions(self):
# offset: 2
<s> make_cmd += ' make cuda11x'
+ elif self.cuda_version_string[:2] == '12' and 1 >= int(self.cuda_version_string[2]) >= 0:
+ make_cmd += ' make cuda12x'
elif self.cuda_version_string == '100':
self.add_log_entry('CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.')
self.add_log_entry('CUDA SETUP: Before you try again running bitsandbytes, make sure old CUDA 10.0 versions are uninstalled and removed from $LD_LIBRARY_PATH variables.')
return
has_cublaslt = is_cublasLt_compatible(self.cc)
if not has_cublaslt:
make_cmd += '_nomatmul'
self.add_log_entry('CUDA SETUP: Something unexpected happened. Please compile from source:')
self.add_log_entry('git clone https://github.com/TimDettmers/bitsandbytes.git')
self.add_log_entry('cd bitsandbytes')
self.add_log_entry(make_cmd)
self.add_log_entry('python setup.py install')
|
bitsandbytes.__main__/find_file_recursive
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
3cefd82d9ce99793eae278933c84bfaba67652ad
|
Quote folder and filename in find_file_recursive (#975)
|
<0>:<add> folder = shlex.quote(folder)
<add> filename = shlex.quote(filename)
|
# module: bitsandbytes.__main__
def find_file_recursive(folder, filename):
<0> cmd = f'find {folder} -name {filename}'
<1> out, err = execute_and_return(cmd)
<2> if len(err) > 0:
<3> raise RuntimeError('Something when wrong when trying to find file. Maybe you do not have a linux system?')
<4>
<5> return out
<6>
|
===========unchanged ref 0===========
at: bitsandbytes.__main__
execute_and_return(command_string: str) -> Tuple[str, str]
at: shlex
quote(s: str) -> str
|
bitsandbytes.cuda_setup.env_vars/to_be_ignored
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
407a8d303f7f6a09175ce82758421bfe65aa57ac
|
Update env_vars.py (#951)
|
<5>:<add> "GOOGLE_VM_CONFIG_LOCK_FILE", # GCP: requires elevated permissions, causing problems in VMs and Jupyter notebooks
<del> "GOOGLE_VM_CONFIG_LOCK_FILE", # on GCP setups, requires elevated permissions, causing problems in Jupyter notebooks
<17>:<del> "GOOGLE_VM_CONFIG_LOCK_FILE", # Google Cloud stuff, contains root only paths
|
# module: bitsandbytes.cuda_setup.env_vars
def to_be_ignored(env_var: str, value: str) -> bool:
<0> ignorable = {
<1> "PWD", # PWD: this is how the shell keeps track of the current working dir
<2> "OLDPWD",
<3> "SSH_AUTH_SOCK", # SSH stuff, therefore unrelated
<4> "SSH_TTY",
<5> "GOOGLE_VM_CONFIG_LOCK_FILE", # on GCP setups, requires elevated permissions, causing problems in Jupyter notebooks
<6> "HOME", # Linux shell default
<7> "TMUX", # Terminal Multiplexer
<8> "XDG_DATA_DIRS", # XDG: Desktop environment stuff
<9> "XDG_GREETER_DATA_DIR", # XDG: Desktop environment stuff
<10> "XDG_RUNTIME_DIR",
<11> "MAIL", # something related to emails
<12> "SHELL", # binary for currently invoked shell
<13> "DBUS_SESSION_BUS_ADDRESS", # hardware related
<14> "PATH", # this is for finding binaries, not libraries
<15> "LESSOPEN", # related to the `less` command
<16> "LESSCLOSE",
<17> "GOOGLE_VM_CONFIG_LOCK_FILE", # Google Cloud stuff, contains root only paths
<18> "_", # current Python interpreter
<19> }
<20> return env_var in ignorable
<21>
| |
tests.test_autograd/test_matmullt
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
f1c75741bfcd41723f96730771d43f51b79b3f42
|
Tests: improve CUDA support detection (#985)
|
<0>:<del> if not torch.cuda.is_available(): pytest.skip('No GPU found.')
|
<s>.mark.parametrize(
"dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, decomp, has_fp16_weights, has_bias",
values,
ids=names,
)
def test_matmullt(
dim1,
dim2,
dim3,
dim4,
funcs,
dtype,
req_grad,
transpose,
decomp,
has_fp16_weights,
has_bias
):
<0> if not torch.cuda.is_available(): pytest.skip('No GPU found.')
<1> dimA = (dim2, dim3) if not transpose[0] else (dim3, dim2)
<2> dimB = (dim3, dim4) if not transpose[1] else (dim4, dim3)
<3> outlier_dim = torch.randint(0, dimA[1], size=(dimA[1] // 8,), device="cuda")
<4> if has_bias == False:
<5> req_grad = list(req_grad)
<6> req_grad[2] = False
<7>
<8> for i in range(k):
<9>
<10> # normal multiply
<11> if funcs[0] in [torch.mm, torch.matmul]:
<12> A = torch.randn(
<13> size=dimA, device="cuda", requires_grad=req_grad[0], dtype=dtype
<14> )
<15> if decomp == 6.0:
<16> with torch.no_grad():
<17> A[:, outlier_dim] = 6.0
<18> B = torch.randn(
<19> size=dimB, device="cuda", requires_grad=req_grad[1], dtype=dtype
<20> )
<21> target = torch.randn(
<22> size=(dim2, dim4),
<23> device="cuda",
<24> requires_grad=req_grad[1],
<25> dtype=dtype,
<26> )
<27> bias = None
<28> bias2 = None
<29> if has_bias:
<30> bias = torch</s>
|
===========below chunk 0===========
<s>ize(
"dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, decomp, has_fp16_weights, has_bias",
values,
ids=names,
)
def test_matmullt(
dim1,
dim2,
dim3,
dim4,
funcs,
dtype,
req_grad,
transpose,
decomp,
has_fp16_weights,
has_bias
):
# offset: 1
bias2 = bias.clone()
torch.nn.init.xavier_uniform_(B)
B2 = B.clone()
state = bnb.MatmulLtState()
state.threshold = decomp
state.has_fp16_weights = has_fp16_weights
if not has_fp16_weights:
if not transpose[0] and not transpose[1]:
B2 = B2.t().contiguous()
(
state.CB,
CBt,
state.SCB,
SCBt,
coo_tensorB,
) = bnb.functional.double_quant(B2.to(torch.float16))
B2 = state.CB
if not transpose[0] and transpose[1]:
out_torch = funcs[0](A, B.t())
out_bnb = funcs[1](A, B2, state=state, bias=bias2)
elif not transpose[0] and not transpose[1]:
out_torch = funcs[0](A, B)
out_bnb = funcs[1](A, B2.t(), state=state, bias=bias2)
if has_bias:
out_torch += bias
assert out_bnb.dtype == A.dtype, f"bnb matmullt received {A.dtype} but returned {out_bnb.dtype}"
n = out_bnb.numel()
err = torch.abs(out_bnb - out_torch).mean().item()
# print</s>
===========below chunk 1===========
<s>ize(
"dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, decomp, has_fp16_weights, has_bias",
values,
ids=names,
)
def test_matmullt(
dim1,
dim2,
dim3,
dim4,
funcs,
dtype,
req_grad,
transpose,
decomp,
has_fp16_weights,
has_bias
):
# offset: 2
<s>.numel()
err = torch.abs(out_bnb - out_torch).mean().item()
# print(f'abs error {err:.4f}')
idx = torch.isclose(out_bnb, out_torch, atol=0.01, rtol=0.1)
assert (idx == 0).sum().item() <= n * (0.0175 if dtype == torch.float16 else 0.021)
idx = torch.isclose(out_bnb, out_torch, atol=0.035, rtol=0.2)
assert (idx == 0).sum().item() <= n * 0.001
if has_fp16_weights:
if any(req_grad):
out_bnb.data.copy_(out_torch)
torch.cuda.synchronize()
loss_bnb = torch.nn.functional.mse_loss(
out_bnb, target
).mean()
loss_bnb.backward()
gradA1 = A.grad
gradB1 = B.grad
A.grad = None
B.grad = None
if has_bias:
gradBias1 = bias.grad
bias.grad = None
loss_torch = torch.nn.functional.mse_loss(
out_torch, target
).mean()
loss_torch.backward()
grad</s>
===========below chunk 2===========
<s>ize(
"dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, decomp, has_fp16_weights, has_bias",
values,
ids=names,
)
def test_matmullt(
dim1,
dim2,
dim3,
dim4,
funcs,
dtype,
req_grad,
transpose,
decomp,
has_fp16_weights,
has_bias
):
# offset: 3
<s> = A.grad
gradB2 = B.grad
A.grad = None
B.grad = None
if has_bias:
gradBias2 = bias.grad
bias.grad = None
if req_grad[0]:
torch.testing.assert_close(
gradA1, gradA2, atol=0.015, rtol=0.1
)
if req_grad[1]:
n = gradB1.numel()
if dim2 > 0:
assert torch.abs(gradB1).sum() > 0.0
assert torch.abs(gradB2).sum() > 0.0
else:
assert torch.abs(gradB1).sum() == 0.0
assert torch.abs(gradB2).sum() == 0.0
idx = torch.isclose(gradB1, gradB2, atol=0.06, rtol=0.3)
assert (idx == 0).sum().item() <= n * 0.1
idx = torch.isclose(gradB1, gradB2, atol=0.10, rtol=0.3)
assert (idx == 0).sum().item() <= n * 0.02
torch.testing.assert_close(
gradB1, gradB2, atol=0.18, rtol=0.3
)
if req_grad[2]:
torch.testing.assert_close(
===========changed ref 0===========
+ # module: tests.conftest
+
+
===========changed ref 1===========
+ # module: tests.conftest
+ @pytest.fixture(scope="session")
+ def requires_cuda() -> bool:
+ cuda_available = torch.cuda.is_available()
+ if not cuda_available:
+ pytest.skip("CUDA is required")
+ return cuda_available
+
===========changed ref 2===========
+ # module: tests.conftest
+ def pytest_runtest_call(item):
+ try:
+ item.runtest()
+ except AssertionError as ae:
+ if str(ae) == "Torch not compiled with CUDA enabled":
+ pytest.skip("Torch not compiled with CUDA enabled")
+ raise
+
|
tests.test_functional/test_nvidia_transform
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
f1c75741bfcd41723f96730771d43f51b79b3f42
|
Tests: improve CUDA support detection (#985)
|
<4>:<add> try:
<add> func = F.get_transform_func(dtype, orderA, orderOut, transpose)
<del> func = F.get_transform_func(dtype, orderA, orderOut, transpose)
<5>:<add> except ValueError as ve:
<add> pytest.skip(str(ve)) # skip if not supported
|
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose",values,ids=names)
def test_nvidia_transform(dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose):
<0> if dims == 3 and out_order != "col32":
<1> return
<2> if dtype == torch.int32 and out_order != "col32":
<3> return
<4> func = F.get_transform_func(dtype, orderA, orderOut, transpose)
<5>
<6> if dims == 2:
<7> A = torch.randint(-128, 127, size=(dim1, dim2), device="cuda").to(dtype)
<8> elif dims == 3:
<9> A = torch.randint(-128, 127, size=(dim1, dim2, dim3), device="cuda").to(
<10> dtype
<11> )
<12>
<13> out, S = F.nvidia_transform(A, to_order=orderOut)
<14>
<15> if orderOut == "row":
<16> torch.testing.assert_close(A.flatten(), out.flatten())
<17> elif orderOut == "col":
<18> torch.testing.assert_close(A.t().flatten(), out.flatten())
<19> elif orderOut == "col32":
<20> if dims == 2:
<21> n = A.shape[0] * (A.shape[1] + (32 - (A.shape[1] % 32)))
<22> elif dims == 3:
<23> n = (
<24> A.shape[0]
<25> * A.shape[1]
<26> * (A.shape[2] + (32 - (A.shape[2] % 32)))
<27> )
<28> assert out.numel() == n
<29> elif orderOut == "col_turing":
<30> # 32 col 8 row tiles
<31> n = (A.shape[0] + (8 - A.shape[0] % 8)) * (
<32> A.shape[1] + (32 - (</s>
|
===========below chunk 0===========
# module: tests.test_functional
@pytest.mark.parametrize("dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose",values,ids=names)
def test_nvidia_transform(dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose):
# offset: 1
)
assert out.numel() == n
total_coltile = (A.shape[1] // 32) + (1 if A.shape[1] % 32 != 0 else 0)
for row in range(A.shape[0]):
for col in range(A.shape[1]):
i = row * A.shape[1]
j = col
coltile = (col // 32) + (1 if col % 32 != 0 else 0)
rowtile = (
(row // 8) + (1 if row % 8 != 0 else 0)
) * total_coltile
offset = 32 * 8 * (rowtile + coltile)
col2 = col % 32
row2 = (row % 8) * 32
assert A.flatten()[i + j] == A[row, col]
# assert A.flatten()[i+j] == out.flatten()[row2+col2]
# torch.testing.assert_close(A.flatten()[i+j], A[row, col])
# torch.testing.assert_close(A.flatten()[i+j], out.flatten()[row2+ col2+block_offset])
if orderOut == "col32":
out2, S = F.nvidia_transform(
out, from_order=orderOut, to_order="row", state=S
)
torch.testing.assert_close(A, out2)
===========changed ref 0===========
+ # module: tests.conftest
+
+
===========changed ref 1===========
+ # module: tests.conftest
+ @pytest.fixture(scope="session")
+ def requires_cuda() -> bool:
+ cuda_available = torch.cuda.is_available()
+ if not cuda_available:
+ pytest.skip("CUDA is required")
+ return cuda_available
+
===========changed ref 2===========
+ # module: tests.conftest
+ def pytest_runtest_call(item):
+ try:
+ item.runtest()
+ except AssertionError as ae:
+ if str(ae) == "Torch not compiled with CUDA enabled":
+ pytest.skip("Torch not compiled with CUDA enabled")
+ raise
+
===========changed ref 3===========
<s>.mark.parametrize(
"dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, decomp, has_fp16_weights, has_bias",
values,
ids=names,
)
def test_matmullt(
dim1,
dim2,
dim3,
dim4,
funcs,
dtype,
req_grad,
transpose,
decomp,
has_fp16_weights,
has_bias
):
- if not torch.cuda.is_available(): pytest.skip('No GPU found.')
dimA = (dim2, dim3) if not transpose[0] else (dim3, dim2)
dimB = (dim3, dim4) if not transpose[1] else (dim4, dim3)
outlier_dim = torch.randint(0, dimA[1], size=(dimA[1] // 8,), device="cuda")
if has_bias == False:
req_grad = list(req_grad)
req_grad[2] = False
for i in range(k):
# normal multiply
if funcs[0] in [torch.mm, torch.matmul]:
A = torch.randn(
size=dimA, device="cuda", requires_grad=req_grad[0], dtype=dtype
)
if decomp == 6.0:
with torch.no_grad():
A[:, outlier_dim] = 6.0
B = torch.randn(
size=dimB, device="cuda", requires_grad=req_grad[1], dtype=dtype
)
target = torch.randn(
size=(dim2, dim4),
device="cuda",
requires_grad=req_grad[1],
dtype=dtype,
)
bias = None
bias2 = None
if has_bias:
bias = torch.randn(dim4, device='cuda', dtype=dtype, requires_grad=req_grad[2])
bias2</s>
===========changed ref 4===========
<s>ize(
"dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, decomp, has_fp16_weights, has_bias",
values,
ids=names,
)
def test_matmullt(
dim1,
dim2,
dim3,
dim4,
funcs,
dtype,
req_grad,
transpose,
decomp,
has_fp16_weights,
has_bias
):
# offset: 1
<s>.randn(dim4, device='cuda', dtype=dtype, requires_grad=req_grad[2])
bias2 = bias.clone()
torch.nn.init.xavier_uniform_(B)
B2 = B.clone()
state = bnb.MatmulLtState()
state.threshold = decomp
state.has_fp16_weights = has_fp16_weights
if not has_fp16_weights:
if not transpose[0] and not transpose[1]:
B2 = B2.t().contiguous()
(
state.CB,
CBt,
state.SCB,
SCBt,
coo_tensorB,
) = bnb.functional.double_quant(B2.to(torch.float16))
B2 = state.CB
if not transpose[0] and transpose[1]:
out_torch = funcs[0](A, B.t())
out_bnb = funcs[1](A, B2, state=state, bias=bias2)
elif not transpose[0] and not transpose[1]:
out_torch = funcs[0](A, B)
out_bnb = funcs[1](A, B2.t(), state=state, bias=bias2)
if has_bias:
out_torch += bias
assert out_bnb.dtype == A.dtype, f"b</s>
|
bitsandbytes.cuda_setup.env_vars/might_contain_a_path
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
e651e8ed61b8fe8dcc3322c34ea3d53c6b323021
|
fix library detection, enable Windows (#873)
|
<0>:<add> return os.sep in candidate
<del> return "/" in candidate
|
# module: bitsandbytes.cuda_setup.env_vars
def might_contain_a_path(candidate: str) -> bool:
<0> return "/" in candidate
<1>
| |
bitsandbytes.cuda_setup.main/CUDASetup.manual_override
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
e651e8ed61b8fe8dcc3322c34ea3d53c6b323021
|
fix library detection, enable Windows (#873)
|
<11>:<add> binary_name = self.binary_name.rsplit(".", 1)[0]
<add> suffix = ".so" if os.name != "nt" else ".dll"
<add> self.binary_name = binary_name[:-3] + f'{os.environ["BNB_CUDA_VERSION"]}.{suffix}'
<del> self.binary_name = self.binary_name[:-6] + f'{os.environ["BNB_CUDA_VERSION"]}.so'
|
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def manual_override(self):
<0> if torch.cuda.is_available():
<1> if 'BNB_CUDA_VERSION' in os.environ:
<2> if len(os.environ['BNB_CUDA_VERSION']) > 0:
<3> warn((f'\n\n{"="*80}\n'
<4> 'WARNING: Manual override via BNB_CUDA_VERSION env variable detected!\n'
<5> 'BNB_CUDA_VERSION=XXX can be used to load a bitsandbytes version that is different from the PyTorch CUDA version.\n'
<6> 'If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\n'
<7> 'If you use the manual override make sure the right libcudart.so is in your LD_LIBRARY_PATH\n'
<8> 'For example by adding the following to your .bashrc: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<path_to_cuda_dir/lib64\n'
<9> f'Loading CUDA version: BNB_CUDA_VERSION={os.environ["BNB_CUDA_VERSION"]}'
<10> f'\n{"="*80}\n\n'))
<11> self.binary_name = self.binary_name[:-6] + f'{os.environ["BNB_CUDA_VERSION"]}.so'
<12>
|
===========unchanged ref 0===========
at: _warnings
warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
at: bitsandbytes.cuda_setup.main.CUDASetup
_instance = None
at: bitsandbytes.cuda_setup.main.CUDASetup.generate_instructions
self.error = True
at: bitsandbytes.cuda_setup.main.CUDASetup.run_cuda_setup
self.initialized = True
self.lib = ct.cdll.LoadLibrary(str(binary_path))
at: os
environ = _createenviron()
at: torch.cuda
is_available() -> bool
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
# these are the most common libs names
# libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
# we have libcudart.so.11.0 which causes a lot of errors before
# not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
+ system = platform.system()
+ if system == 'Windows':
+ CUDA_RUNTIME_LIBS: list = ["nvcuda.dll"]
+ else: # Linux or other
+ CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
- CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
# this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths
backup_paths = []
backup_paths.append('$CONDA_PREFIX/lib/libcudart.so.11.0')
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.env_vars
def might_contain_a_path(candidate: str) -> bool:
+ return os.sep in candidate
- return "/" in candidate
|
bitsandbytes.cuda_setup.main/CUDASetup.run_cuda_setup
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
e651e8ed61b8fe8dcc3322c34ea3d53c6b323021
|
fix library detection, enable Windows (#873)
|
<14>:<add> suffix = ".so" if os.name != "nt" else ".dll"
<17>:<add> legacy_binary_name = f"libbitsandbytes_cpu{suffix}"
<del> legacy_binary_name = "libbitsandbytes_cpu.so"
|
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
<0> self.initialized = True
<1> self.cuda_setup_log = []
<2>
<3> binary_name, cudart_path, cc, cuda_version_string = evaluate_cuda_setup()
<4> self.cudart_path = cudart_path
<5> self.cuda_available = torch.cuda.is_available()
<6> self.cc = cc
<7> self.cuda_version_string = cuda_version_string
<8> self.binary_name = binary_name
<9> self.manual_override()
<10>
<11> package_dir = Path(__file__).parent.parent
<12> binary_path = package_dir / self.binary_name
<13>
<14> try:
<15> if not binary_path.exists():
<16> self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?")
<17> legacy_binary_name = "libbitsandbytes_cpu.so"
<18> self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...")
<19> binary_path = package_dir / legacy_binary_name
<20> if not binary_path.exists() or torch.cuda.is_available():
<21> self.add_log_entry('')
<22> self.add_log_entry('='*48 + 'ERROR' + '='*37)
<23> self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:')
<24> self.add_log_entry('1. You need to manually override the PyTorch CUDA version. Please see: '
<25> '"https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md')
<26> self.add_log_entry('2. CUDA driver not installed')
<27> self.add_log_entry('3. CUDA not installed')
<28> self.</s>
|
===========below chunk 0===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
# offset: 1
self.add_log_entry('5. Required library not pre-compiled for this bitsandbytes release!')
self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.')
self.add_log_entry('CUDA SETUP: The CUDA version for the compile might depend on your conda install. Inspect CUDA version via `conda list | grep cuda`.')
self.add_log_entry('='*80)
self.add_log_entry('')
self.generate_instructions()
raise Exception('CUDA SETUP: Setup Failed!')
self.lib = ct.cdll.LoadLibrary(binary_path)
else:
self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path}...")
self.lib = ct.cdll.LoadLibrary(binary_path)
except Exception as ex:
self.add_log_entry(str(ex))
===========unchanged ref 0===========
at: bitsandbytes.cuda_setup.main
evaluate_cuda_setup()
at: bitsandbytes.cuda_setup.main.CUDASetup
manual_override()
manual_override(self)
add_log_entry(msg, is_warning=False)
at: bitsandbytes.cuda_setup.main.CUDASetup.initialize
self.initialized = False
at: os
name = 'posix'
name = 'nt'
environ = _createenviron()
at: pathlib
Path()
at: pathlib.Path
__slots__ = ()
exists() -> bool
at: pathlib.PurePath
__slots__ = (
'_drv', '_root', '_parts',
'_str', '_hash', '_pparts', '_cached_cparts',
)
drive = property(attrgetter('_drv'),
doc="""The drive prefix (letter or UNC path), if any.""")
root = property(attrgetter('_root'),
doc="""The root of the path, if any.""")
at: torch.cuda
is_available() -> bool
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def manual_override(self):
if torch.cuda.is_available():
if 'BNB_CUDA_VERSION' in os.environ:
if len(os.environ['BNB_CUDA_VERSION']) > 0:
warn((f'\n\n{"="*80}\n'
'WARNING: Manual override via BNB_CUDA_VERSION env variable detected!\n'
'BNB_CUDA_VERSION=XXX can be used to load a bitsandbytes version that is different from the PyTorch CUDA version.\n'
'If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\n'
'If you use the manual override make sure the right libcudart.so is in your LD_LIBRARY_PATH\n'
'For example by adding the following to your .bashrc: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<path_to_cuda_dir/lib64\n'
f'Loading CUDA version: BNB_CUDA_VERSION={os.environ["BNB_CUDA_VERSION"]}'
f'\n{"="*80}\n\n'))
+ binary_name = self.binary_name.rsplit(".", 1)[0]
+ suffix = ".so" if os.name != "nt" else ".dll"
+ self.binary_name = binary_name[:-3] + f'{os.environ["BNB_CUDA_VERSION"]}.{suffix}'
- self.binary_name = self.binary_name[:-6] + f'{os.environ["BNB_CUDA_VERSION"]}.so'
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
# these are the most common libs names
# libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
# we have libcudart.so.11.0 which causes a lot of errors before
# not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
+ system = platform.system()
+ if system == 'Windows':
+ CUDA_RUNTIME_LIBS: list = ["nvcuda.dll"]
+ else: # Linux or other
+ CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
- CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
# this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths
backup_paths = []
backup_paths.append('$CONDA_PREFIX/lib/libcudart.so.11.0')
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.env_vars
def might_contain_a_path(candidate: str) -> bool:
+ return os.sep in candidate
- return "/" in candidate
|
bitsandbytes.cuda_setup.main/extract_candidate_paths
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
e651e8ed61b8fe8dcc3322c34ea3d53c6b323021
|
fix library detection, enable Windows (#873)
|
<0>:<add> return {Path(ld_path) for ld_path in paths_list_candidate.split(os.pathsep) if ld_path}
<del> return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path}
|
# module: bitsandbytes.cuda_setup.main
def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]:
<0> return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path}
<1>
|
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def manual_override(self):
if torch.cuda.is_available():
if 'BNB_CUDA_VERSION' in os.environ:
if len(os.environ['BNB_CUDA_VERSION']) > 0:
warn((f'\n\n{"="*80}\n'
'WARNING: Manual override via BNB_CUDA_VERSION env variable detected!\n'
'BNB_CUDA_VERSION=XXX can be used to load a bitsandbytes version that is different from the PyTorch CUDA version.\n'
'If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\n'
'If you use the manual override make sure the right libcudart.so is in your LD_LIBRARY_PATH\n'
'For example by adding the following to your .bashrc: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<path_to_cuda_dir/lib64\n'
f'Loading CUDA version: BNB_CUDA_VERSION={os.environ["BNB_CUDA_VERSION"]}'
f'\n{"="*80}\n\n'))
+ binary_name = self.binary_name.rsplit(".", 1)[0]
+ suffix = ".so" if os.name != "nt" else ".dll"
+ self.binary_name = binary_name[:-3] + f'{os.environ["BNB_CUDA_VERSION"]}.{suffix}'
- self.binary_name = self.binary_name[:-6] + f'{os.environ["BNB_CUDA_VERSION"]}.so'
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
# these are the most common libs names
# libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
# we have libcudart.so.11.0 which causes a lot of errors before
# not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
+ system = platform.system()
+ if system == 'Windows':
+ CUDA_RUNTIME_LIBS: list = ["nvcuda.dll"]
+ else: # Linux or other
+ CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
- CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
# this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths
backup_paths = []
backup_paths.append('$CONDA_PREFIX/lib/libcudart.so.11.0')
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
self.initialized = True
self.cuda_setup_log = []
binary_name, cudart_path, cc, cuda_version_string = evaluate_cuda_setup()
self.cudart_path = cudart_path
self.cuda_available = torch.cuda.is_available()
self.cc = cc
self.cuda_version_string = cuda_version_string
self.binary_name = binary_name
self.manual_override()
package_dir = Path(__file__).parent.parent
binary_path = package_dir / self.binary_name
+ suffix = ".so" if os.name != "nt" else ".dll"
try:
if not binary_path.exists():
self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?")
+ legacy_binary_name = f"libbitsandbytes_cpu{suffix}"
- legacy_binary_name = "libbitsandbytes_cpu.so"
self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...")
binary_path = package_dir / legacy_binary_name
if not binary_path.exists() or torch.cuda.is_available():
self.add_log_entry('')
self.add_log_entry('='*48 + 'ERROR' + '='*37)
self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:')
self.add_log_entry('1. You need to manually override the PyTorch CUDA version. Please see: '
'"https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md')
self.add_log_entry('2. CUDA driver not installed')
self.add_log_entry</s>
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
# offset: 1
<s>.md')
self.add_log_entry('2. CUDA driver not installed')
self.add_log_entry('3. CUDA not installed')
self.add_log_entry('4. You have multiple conflicting CUDA libraries')
self.add_log_entry('5. Required library not pre-compiled for this bitsandbytes release!')
self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.')
self.add_log_entry('CUDA SETUP: The CUDA version for the compile might depend on your conda install. Inspect CUDA version via `conda list | grep cuda`.')
self.add_log_entry('='*80)
self.add_log_entry('')
self.generate_instructions()
raise Exception('CUDA SETUP: Setup Failed!')
+ self.lib = ct.cdll.LoadLibrary(str(binary_path))
- self.lib = ct.cdll.LoadLibrary(binary_path)
else:
+ self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path!s}...")
- self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path}...")
+ self.lib = ct.cdll.LoadLibrary(str(binary_path))
- self.lib = ct.cdll.LoadLibrary(binary_path)
except Exception as ex:
self.add_log_entry(str(ex))
===========changed ref 4===========
# module: bitsandbytes.cuda_setup.env_vars
def might_contain_a_path(candidate: str) -> bool:
+ return os.sep in candidate
- return "/" in candidate
|
bitsandbytes.cuda_setup.main/evaluate_cuda_setup
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
e651e8ed61b8fe8dcc3322c34ea3d53c6b323021
|
fix library detection, enable Windows (#873)
|
<1>:<add> suffix = ".so" if os.name != "nt" else ".dll"
<7>:<add> if not torch.cuda.is_available(): return f'libbitsandbytes_cpu{suffix}', None, None, None
<del> if not torch.cuda.is_available(): return 'libbitsandbytes_cpu.so', None, None, None
|
# module: bitsandbytes.cuda_setup.main
def evaluate_cuda_setup():
<0> cuda_setup = CUDASetup.get_instance()
<1> if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
<2> cuda_setup.add_log_entry('')
<3> cuda_setup.add_log_entry('='*35 + 'BUG REPORT' + '='*35)
<4> cuda_setup.add_log_entry(('Welcome to bitsandbytes. For bug reports, please run\n\npython -m bitsandbytes\n\n'),
<5> ('and submit this information together with your error trace to: https://github.com/TimDettmers/bitsandbytes/issues'))
<6> cuda_setup.add_log_entry('='*80)
<7> if not torch.cuda.is_available(): return 'libbitsandbytes_cpu.so', None, None, None
<8>
<9> cudart_path = determine_cuda_runtime_lib_path()
<10> ccs = get_compute_capabilities()
<11> ccs.sort()
<12> cc = ccs[-1] # we take the highest capability
<13> cuda_version_string = get_cuda_version()
<14>
<15> cuda_setup.add_log_entry(f"CUDA SETUP: PyTorch settings found: CUDA_VERSION={cuda_version_string}, Highest Compute Capability: {cc}.")
<16> cuda_setup.add_log_entry(f"CUDA SETUP: To manually override the PyTorch CUDA version please see:"
<17> "https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md")
<18>
<19>
<20> # 7.5 is the minimum CC vor cublaslt
<21> has_cublaslt = is_cublasLt_compatible(cc)
<22>
<23> # TODO:
<24> # (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible)</s>
|
===========below chunk 0===========
# module: bitsandbytes.cuda_setup.main
def evaluate_cuda_setup():
# offset: 1
# we use ls -l instead of nvcc to determine the cuda version
# since most installations will have the libcudart.so installed, but not the compiler
if has_cublaslt:
binary_name = f"libbitsandbytes_cuda{cuda_version_string}.so"
else:
"if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so"
binary_name = f"libbitsandbytes_cuda{cuda_version_string}_nocublaslt.so"
return binary_name, cudart_path, cc, cuda_version_string
===========unchanged ref 0===========
at: bitsandbytes.cuda_setup.main
CUDASetup()
is_cublasLt_compatible(cc)
determine_cuda_runtime_lib_path() -> Union[Path, None]
get_cuda_version()
get_compute_capabilities()
at: bitsandbytes.cuda_setup.main.CUDASetup
get_instance()
at: bitsandbytes.cuda_setup.main.get_compute_capabilities
ccs = []
at: os
name = 'posix'
name = 'nt'
environ = _createenviron()
at: torch.cuda
is_available() -> bool
device(device: Any)
get_device_capability(device: Optional[_device_t]=None) -> Tuple[int, int]
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]:
+ return {Path(ld_path) for ld_path in paths_list_candidate.split(os.pathsep) if ld_path}
- return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path}
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def manual_override(self):
if torch.cuda.is_available():
if 'BNB_CUDA_VERSION' in os.environ:
if len(os.environ['BNB_CUDA_VERSION']) > 0:
warn((f'\n\n{"="*80}\n'
'WARNING: Manual override via BNB_CUDA_VERSION env variable detected!\n'
'BNB_CUDA_VERSION=XXX can be used to load a bitsandbytes version that is different from the PyTorch CUDA version.\n'
'If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\n'
'If you use the manual override make sure the right libcudart.so is in your LD_LIBRARY_PATH\n'
'For example by adding the following to your .bashrc: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<path_to_cuda_dir/lib64\n'
f'Loading CUDA version: BNB_CUDA_VERSION={os.environ["BNB_CUDA_VERSION"]}'
f'\n{"="*80}\n\n'))
+ binary_name = self.binary_name.rsplit(".", 1)[0]
+ suffix = ".so" if os.name != "nt" else ".dll"
+ self.binary_name = binary_name[:-3] + f'{os.environ["BNB_CUDA_VERSION"]}.{suffix}'
- self.binary_name = self.binary_name[:-6] + f'{os.environ["BNB_CUDA_VERSION"]}.so'
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
# these are the most common libs names
# libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
# we have libcudart.so.11.0 which causes a lot of errors before
# not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
+ system = platform.system()
+ if system == 'Windows':
+ CUDA_RUNTIME_LIBS: list = ["nvcuda.dll"]
+ else: # Linux or other
+ CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
- CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
# this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths
backup_paths = []
backup_paths.append('$CONDA_PREFIX/lib/libcudart.so.11.0')
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
self.initialized = True
self.cuda_setup_log = []
binary_name, cudart_path, cc, cuda_version_string = evaluate_cuda_setup()
self.cudart_path = cudart_path
self.cuda_available = torch.cuda.is_available()
self.cc = cc
self.cuda_version_string = cuda_version_string
self.binary_name = binary_name
self.manual_override()
package_dir = Path(__file__).parent.parent
binary_path = package_dir / self.binary_name
+ suffix = ".so" if os.name != "nt" else ".dll"
try:
if not binary_path.exists():
self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?")
+ legacy_binary_name = f"libbitsandbytes_cpu{suffix}"
- legacy_binary_name = "libbitsandbytes_cpu.so"
self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...")
binary_path = package_dir / legacy_binary_name
if not binary_path.exists() or torch.cuda.is_available():
self.add_log_entry('')
self.add_log_entry('='*48 + 'ERROR' + '='*37)
self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:')
self.add_log_entry('1. You need to manually override the PyTorch CUDA version. Please see: '
'"https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md')
self.add_log_entry('2. CUDA driver not installed')
self.add_log_entry</s>
|
bitsandbytes.__main__/find_file_recursive
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
e651e8ed61b8fe8dcc3322c34ea3d53c6b323021
|
fix library detection, enable Windows (#873)
|
<0>:<add> import glob
<add> outs = []
<add> try:
<add> for ext in ["so", "dll", "dylib"]:
<add> out = glob.glob(os.path.join(folder, "**", filename + ext))
<add> outs.extend(out)
<add> except Exception as e:
<del> folder = shlex.quote(folder)
<1>:<del> filename = shlex.quote(filename)
<2>:<del> cmd = f'find {folder} -name {filename}'
<3>:<del> out, err = execute_and_return(cmd)
<4>:<del> if len(err) > 0:
<5>:<add> raise RuntimeError('Error: Something when wrong when trying to find file. {e}')
<del> raise RuntimeError('Something when wrong when trying to find file. Maybe you do not have a linux system?')
<7>:<add> return outs
<del> return out
|
# module: bitsandbytes.__main__
def find_file_recursive(folder, filename):
<0> folder = shlex.quote(folder)
<1> filename = shlex.quote(filename)
<2> cmd = f'find {folder} -name {filename}'
<3> out, err = execute_and_return(cmd)
<4> if len(err) > 0:
<5> raise RuntimeError('Something when wrong when trying to find file. Maybe you do not have a linux system?')
<6>
<7> return out
<8>
|
===========unchanged ref 0===========
at: bitsandbytes.__main__
find_file_recursive(folder, filename)
print_header(txt: str, width: int=HEADER_WIDTH, filler: str="+") -> None
at: os
environ = _createenviron()
at: os.path
isdir(s: AnyPath) -> bool
===========changed ref 0===========
# module: bitsandbytes.__main__
- def execute_and_return(command_string: str) -> Tuple[str, str]:
- def _decode(subprocess_err_out_tuple):
- return tuple(
- to_decode.decode("UTF-8").strip()
- for to_decode in subprocess_err_out_tuple
- )
-
- def execute_and_return_decoded_std_streams(command_string):
- return _decode(
- subprocess.Popen(
- shlex.split(command_string),
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- ).communicate()
- )
-
- std_out, std_err = execute_and_return_decoded_std_streams(command_string)
- return std_out, std_err
-
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.env_vars
def might_contain_a_path(candidate: str) -> bool:
+ return os.sep in candidate
- return "/" in candidate
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]:
+ return {Path(ld_path) for ld_path in paths_list_candidate.split(os.pathsep) if ld_path}
- return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path}
===========changed ref 3===========
# module: setup
libs = list(glob.glob("./bitsandbytes/libbitsandbytes*.so"))
+ libs += list(glob.glob("./bitsandbytes/libbitsandbytes*.dll"))
libs = [os.path.basename(p) for p in libs]
print("libs:", libs)
===========changed ref 4===========
# module: bitsandbytes.cuda_setup.main
# these are the most common libs names
# libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
# we have libcudart.so.11.0 which causes a lot of errors before
# not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
+ system = platform.system()
+ if system == 'Windows':
+ CUDA_RUNTIME_LIBS: list = ["nvcuda.dll"]
+ else: # Linux or other
+ CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
- CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
# this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths
backup_paths = []
backup_paths.append('$CONDA_PREFIX/lib/libcudart.so.11.0')
===========changed ref 5===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def manual_override(self):
if torch.cuda.is_available():
if 'BNB_CUDA_VERSION' in os.environ:
if len(os.environ['BNB_CUDA_VERSION']) > 0:
warn((f'\n\n{"="*80}\n'
'WARNING: Manual override via BNB_CUDA_VERSION env variable detected!\n'
'BNB_CUDA_VERSION=XXX can be used to load a bitsandbytes version that is different from the PyTorch CUDA version.\n'
'If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\n'
'If you use the manual override make sure the right libcudart.so is in your LD_LIBRARY_PATH\n'
'For example by adding the following to your .bashrc: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<path_to_cuda_dir/lib64\n'
f'Loading CUDA version: BNB_CUDA_VERSION={os.environ["BNB_CUDA_VERSION"]}'
f'\n{"="*80}\n\n'))
+ binary_name = self.binary_name.rsplit(".", 1)[0]
+ suffix = ".so" if os.name != "nt" else ".dll"
+ self.binary_name = binary_name[:-3] + f'{os.environ["BNB_CUDA_VERSION"]}.{suffix}'
- self.binary_name = self.binary_name[:-6] + f'{os.environ["BNB_CUDA_VERSION"]}.so'
===========changed ref 6===========
# module: bitsandbytes.cuda_setup.main
def evaluate_cuda_setup():
cuda_setup = CUDASetup.get_instance()
+ suffix = ".so" if os.name != "nt" else ".dll"
if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
cuda_setup.add_log_entry('')
cuda_setup.add_log_entry('='*35 + 'BUG REPORT' + '='*35)
cuda_setup.add_log_entry(('Welcome to bitsandbytes. For bug reports, please run\n\npython -m bitsandbytes\n\n'),
('and submit this information together with your error trace to: https://github.com/TimDettmers/bitsandbytes/issues'))
cuda_setup.add_log_entry('='*80)
+ if not torch.cuda.is_available(): return f'libbitsandbytes_cpu{suffix}', None, None, None
- if not torch.cuda.is_available(): return 'libbitsandbytes_cpu.so', None, None, None
cudart_path = determine_cuda_runtime_lib_path()
ccs = get_compute_capabilities()
ccs.sort()
cc = ccs[-1] # we take the highest capability
cuda_version_string = get_cuda_version()
cuda_setup.add_log_entry(f"CUDA SETUP: PyTorch settings found: CUDA_VERSION={cuda_version_string}, Highest Compute Capability: {cc}.")
cuda_setup.add_log_entry(f"CUDA SETUP: To manually override the PyTorch CUDA version please see:"
"https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md")
# 7.5 is the minimum CC vor cublaslt
has_cublaslt = is_cublasLt_compatible(cc)
# TODO:
</s>
|
bitsandbytes.__main__/generate_bug_report_information
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
e651e8ed61b8fe8dcc3322c34ea3d53c6b323021
|
fix library detection, enable Windows (#873)
|
<6>:<add> paths = find_file_recursive(os.environ['CONDA_PREFIX'], '*cuda*')
<del> paths = find_file_recursive(os.environ['CONDA_PREFIX'], '*cuda*so')
<11>:<add> paths = find_file_recursive('/usr/local', '*cuda*')
<del> paths = find_file_recursive('/usr/local', '*cuda*so')
<13>:<add> print(paths)
<add> print('')
<add> if 'CUDA_PATH' in os.environ and isdir(os.environ['CUDA_PATH']):
<add> paths = find_file_recursive(os.environ['CUDA_PATH'], '*cuda*')
<add> print_header("CUDA PATHS")
<17>:<add> paths = find_file_recursive(os.getcwd(), '*cuda*')
<del> paths = find_file_recursive(os.getcwd(), '*cuda*so')
<25>:<add> for path in set(lib_path.split(os.pathsep)):
<del> for path in set(lib_path.split(':')):
<29>:<add> paths = find_file_recursive(path, '*cuda*')
<del> paths = find_file_recursive(path, '*cuda*so')
|
# module: bitsandbytes.__main__
def generate_bug_report_information():
<0> print_header("")
<1> print_header("BUG REPORT INFORMATION")
<2> print_header("")
<3> print('')
<4>
<5> if 'CONDA_PREFIX' in os.environ:
<6> paths = find_file_recursive(os.environ['CONDA_PREFIX'], '*cuda*so')
<7> print_header("ANACONDA CUDA PATHS")
<8> print(paths)
<9> print('')
<10> if isdir('/usr/local/'):
<11> paths = find_file_recursive('/usr/local', '*cuda*so')
<12> print_header("/usr/local CUDA PATHS")
<13> print(paths)
<14> print('')
<15>
<16> if isdir(os.getcwd()):
<17> paths = find_file_recursive(os.getcwd(), '*cuda*so')
<18> print_header("WORKING DIRECTORY CUDA PATHS")
<19> print(paths)
<20> print('')
<21>
<22> print_header("LD_LIBRARY CUDA PATHS")
<23> if 'LD_LIBRARY_PATH' in os.environ:
<24> lib_path = os.environ['LD_LIBRARY_PATH'].strip()
<25> for path in set(lib_path.split(':')):
<26> try:
<27> if isdir(path):
<28> print_header(f"{path} CUDA PATHS")
<29> paths = find_file_recursive(path, '*cuda*so')
<30> print(paths)
<31> except:
<32> print(f'Could not read LD_LIBRARY_PATH: {path}')
<33> print('')
<34>
|
===========unchanged ref 0===========
at: bitsandbytes.__main__
HEADER_WIDTH = 60
find_file_recursive(folder, filename)
at: os
getcwd() -> str
environ = _createenviron()
at: os.path
isdir(s: AnyPath) -> bool
===========changed ref 0===========
# module: bitsandbytes.__main__
def find_file_recursive(folder, filename):
+ import glob
+ outs = []
+ try:
+ for ext in ["so", "dll", "dylib"]:
+ out = glob.glob(os.path.join(folder, "**", filename + ext))
+ outs.extend(out)
+ except Exception as e:
- folder = shlex.quote(folder)
- filename = shlex.quote(filename)
- cmd = f'find {folder} -name {filename}'
- out, err = execute_and_return(cmd)
- if len(err) > 0:
+ raise RuntimeError('Error: Something when wrong when trying to find file. {e}')
- raise RuntimeError('Something when wrong when trying to find file. Maybe you do not have a linux system?')
+ return outs
- return out
===========changed ref 1===========
# module: bitsandbytes.__main__
- def execute_and_return(command_string: str) -> Tuple[str, str]:
- def _decode(subprocess_err_out_tuple):
- return tuple(
- to_decode.decode("UTF-8").strip()
- for to_decode in subprocess_err_out_tuple
- )
-
- def execute_and_return_decoded_std_streams(command_string):
- return _decode(
- subprocess.Popen(
- shlex.split(command_string),
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- ).communicate()
- )
-
- std_out, std_err = execute_and_return_decoded_std_streams(command_string)
- return std_out, std_err
-
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.env_vars
def might_contain_a_path(candidate: str) -> bool:
+ return os.sep in candidate
- return "/" in candidate
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]:
+ return {Path(ld_path) for ld_path in paths_list_candidate.split(os.pathsep) if ld_path}
- return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path}
===========changed ref 4===========
# module: setup
libs = list(glob.glob("./bitsandbytes/libbitsandbytes*.so"))
+ libs += list(glob.glob("./bitsandbytes/libbitsandbytes*.dll"))
libs = [os.path.basename(p) for p in libs]
print("libs:", libs)
===========changed ref 5===========
# module: bitsandbytes.cuda_setup.main
# these are the most common libs names
# libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
# we have libcudart.so.11.0 which causes a lot of errors before
# not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
+ system = platform.system()
+ if system == 'Windows':
+ CUDA_RUNTIME_LIBS: list = ["nvcuda.dll"]
+ else: # Linux or other
+ CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
- CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
# this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths
backup_paths = []
backup_paths.append('$CONDA_PREFIX/lib/libcudart.so.11.0')
===========changed ref 6===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def manual_override(self):
if torch.cuda.is_available():
if 'BNB_CUDA_VERSION' in os.environ:
if len(os.environ['BNB_CUDA_VERSION']) > 0:
warn((f'\n\n{"="*80}\n'
'WARNING: Manual override via BNB_CUDA_VERSION env variable detected!\n'
'BNB_CUDA_VERSION=XXX can be used to load a bitsandbytes version that is different from the PyTorch CUDA version.\n'
'If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\n'
'If you use the manual override make sure the right libcudart.so is in your LD_LIBRARY_PATH\n'
'For example by adding the following to your .bashrc: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<path_to_cuda_dir/lib64\n'
f'Loading CUDA version: BNB_CUDA_VERSION={os.environ["BNB_CUDA_VERSION"]}'
f'\n{"="*80}\n\n'))
+ binary_name = self.binary_name.rsplit(".", 1)[0]
+ suffix = ".so" if os.name != "nt" else ".dll"
+ self.binary_name = binary_name[:-3] + f'{os.environ["BNB_CUDA_VERSION"]}.{suffix}'
- self.binary_name = self.binary_name[:-6] + f'{os.environ["BNB_CUDA_VERSION"]}.so'
===========changed ref 7===========
# module: bitsandbytes.cuda_setup.main
def evaluate_cuda_setup():
cuda_setup = CUDASetup.get_instance()
+ suffix = ".so" if os.name != "nt" else ".dll"
if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
cuda_setup.add_log_entry('')
cuda_setup.add_log_entry('='*35 + 'BUG REPORT' + '='*35)
cuda_setup.add_log_entry(('Welcome to bitsandbytes. For bug reports, please run\n\npython -m bitsandbytes\n\n'),
('and submit this information together with your error trace to: https://github.com/TimDettmers/bitsandbytes/issues'))
cuda_setup.add_log_entry('='*80)
+ if not torch.cuda.is_available(): return f'libbitsandbytes_cpu{suffix}', None, None, None
- if not torch.cuda.is_available(): return 'libbitsandbytes_cpu.so', None, None, None
cudart_path = determine_cuda_runtime_lib_path()
ccs = get_compute_capabilities()
ccs.sort()
cc = ccs[-1] # we take the highest capability
cuda_version_string = get_cuda_version()
cuda_setup.add_log_entry(f"CUDA SETUP: PyTorch settings found: CUDA_VERSION={cuda_version_string}, Highest Compute Capability: {cc}.")
cuda_setup.add_log_entry(f"CUDA SETUP: To manually override the PyTorch CUDA version please see:"
"https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md")
# 7.5 is the minimum CC vor cublaslt
has_cublaslt = is_cublasLt_compatible(cc)
# TODO:
</s>
|
bitsandbytes.functional/create_normal_map
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
32be289735cdf059f7f89997d70be5ea6bd4ba30
|
Don't require scipy for regular use (#948)
|
<0>:<add> try:
<add> from scipy.stats import norm
<del> from scipy.stats import norm
<1>:<add> except ImportError as ie:
<add> raise ImportError(
<add> "Scipy is required for `create_normal_map`. "
<add> "Install `bitsandbytes` with the `[test]` extra."
<add> ) from ie
|
# module: bitsandbytes.functional
def create_normal_map(offset=0.9677083, use_extra_value=True):
<0> from scipy.stats import norm
<1>
<2> if use_extra_value:
<3> # one more positive value, this is an asymmetric type
<4> v1 = norm.ppf(torch.linspace(offset, 0.5, 9)[:-1]).tolist()
<5> v2 = [0]*(256-15) ## we have 15 non-zero values in this data type
<6> v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
<7> else:
<8> v1 = norm.ppf(torch.linspace(offset, 0.5, 8)[:-1]).tolist()
<9> v2 = [0]*(256-14) ## we have 14 non-zero values in this data type
<10> v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
<11>
<12> v = v1 + v2 + v3
<13>
<14> values = torch.Tensor(v)
<15> values = values.sort().values
<16> values /= values.max()
<17>
<18> assert values.numel() == 256
<19>
<20> return values
<21>
|
===========unchanged ref 0===========
at: torch._C._VariableFunctions
linspace(start: Number, end: Number, steps: Optional[_int]=None, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor
linspace(start: Union[Number, _complex], end: Union[Number, _complex], steps: _int, *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
|
bitsandbytes.nn.triton_based_modules/SwitchBackLinear.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
a8c9dfa6aefd63c1ccdb62b5f37a99dd90906e8c
|
Fix some issues found by Mypy (#995)
|
<2>:<add> if not is_triton_available():
<del> if not is_triton_available:
|
# module: bitsandbytes.nn.triton_based_modules
class SwitchBackLinear(nn.Linear):
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
device=None,
dtype=None,
vector_wise_quantization: bool = False,
mem_efficient : bool = False,
):
<0> super().__init__(in_features, out_features, bias, device, dtype)
<1>
<2> if not is_triton_available:
<3> raise ImportError('''Could not import triton. Please install triton to use SwitchBackLinear.
<4> Alternatively, you can use bnb.nn.SwitchBackLinearBnb, but it will be slower''')
<5>
<6> # By default, we use the global quantization.
<7> self.vector_wise_quantization = vector_wise_quantization
<8> if self.vector_wise_quantization:
<9> self._fn = _switchback_vectorrize
<10> if mem_efficient:
<11> print('mem efficient is not supported for vector-wise quantization.')
<12> exit(1)
<13> else:
<14> if mem_efficient:
<15> self._fn = _switchback_global_mem_efficient
<16> else:
<17> self._fn = _switchback_global
<18>
|
===========unchanged ref 0===========
at: bitsandbytes.nn.triton_based_modules
_switchback_global(*args, **kwargs)
_switchback_vectorrize(*args, **kwargs)
_switchback_global_mem_efficient(*args, **kwargs)
at: bitsandbytes.triton.triton_utils
is_triton_available()
at: torch.nn.modules.linear.Linear
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
__init__(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) -> None
__init__(self, in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) -> None
===========changed ref 0===========
# module: bitsandbytes.autograd._functions
- tensor = torch.Tensor
-
-
# The inverse transformation for the colTuring and colAmpere format were contributed by Alex Borzunov:
# https://github.com/bigscience-workshop/petals/blob/main/src/petals/utils/linear8bitlt_patch.py
"""
This class pools outlier dimensions across layers.
This is particularly important for small models where outlier features
are less systematic and occur with low frequency.
"""
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
# these are the most common libs names
# libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
# we have libcudart.so.11.0 which causes a lot of errors before
# not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
system = platform.system()
if system == 'Windows':
+ CUDA_RUNTIME_LIBS = ["nvcuda.dll"]
- CUDA_RUNTIME_LIBS: list = ["nvcuda.dll"]
else: # Linux or other
+ CUDA_RUNTIME_LIBS = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
- CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
# this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths
backup_paths = []
backup_paths.append('$CONDA_PREFIX/lib/libcudart.so.11.0')
|
bitsandbytes.functional/QuantState.from_dict
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
a8c9dfa6aefd63c1ccdb62b5f37a99dd90906e8c
|
Fix some issues found by Mypy (#995)
|
<18>:<add> first_qs_key = qs_key[0]
<del> qs_key = qs_key[0]
<19>:<add> qs_dict.update(unpack_tensor_to_dict(qs_dict.pop(first_qs_key)))
<del> qs_dict.update(unpack_tensor_to_dict(qs_dict.pop(qs_key)))
|
# module: bitsandbytes.functional
class QuantState:
@classmethod
def from_dict(cls, qs_dict: Dict[str, Any], device: torch.device) -> 'QuantState':
<0> """
<1> unpacks components of state_dict into QuantState
<2> where necessary, convert into strings, torch.dtype, ints, etc.
<3>
<4> qs_dict: based on state_dict, with only relevant keys, striped of prefixes.
<5>
<6> item with key `quant_state.bitsandbytes__[nf4/fp4]` may contain minor and non-tensor quant state items.
<7> """
<8>
<9> # unpacking tensor with non-tensor components
<10> qs_key = [k for k, v in qs_dict.items() if "quant_state" in k and isinstance(v, torch.Tensor)]
<11> if not len(qs_key) and 'quant_type' not in qs_dict:
<12> raise ValueError("Expected packed or unpacked quant_state items, found neither")
<13> elif len(qs_key) != 1 or qs_key[0].split(".")[-1] not in cls.valid_qs_type_keys:
<14> raise ValueError(f"There should be exactly one `quant_state` item with ending from {cls.valid_qs_type_keys}.\nDetected {qs_key}.")
<15>
<16> # unpacking minor and non-tensor quant state items if necessary
<17> if len(qs_key) == 1:
<18> qs_key = qs_key[0]
<19> qs_dict.update(unpack_tensor_to_dict(qs_dict.pop(qs_key)))
<20>
<21> qs_dict = {k.split('.')[-1]: v for k, v in qs_dict.items()} # strip prefixes
<22> assert set(qs_dict.keys()).issubset(cls.valid_qs_keys)
<23>
<24> if 'nested_absmax' in qs_dict:
<25> offset = torch.tensor(float(qs_dict['nested_offset'])).to(device)
<26> state2 = cls(
<27> absmax=qs</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
class QuantState:
@classmethod
def from_dict(cls, qs_dict: Dict[str, Any], device: torch.device) -> 'QuantState':
# offset: 1
blocksize=qs_dict['nested_blocksize'],
code=qs_dict['nested_quant_map'].to(device),
dtype=getattr(torch, qs_dict['nested_dtype']),
)
else:
offset, state2 = None, None
quant_state = cls(
quant_type=qs_dict['quant_type'],
absmax=qs_dict['absmax'].to(device),
blocksize=qs_dict['blocksize'],
code=qs_dict['quant_map'].to(device),
dtype=getattr(torch, qs_dict['dtype']),
shape=torch.Size(qs_dict['shape']) if qs_dict['shape'] is not None else None,
offset=offset,
state2=state2,
)
return quant_state
===========unchanged ref 0===========
at: bitsandbytes.functional.QuantState
valid_quant_types = ('fp4', 'nf4')
valid_qs_type_keys = [f"bitsandbytes__{x}" for x in valid_quant_types]
valid_qs_keys = ['absmax', 'quant_map', 'nested_absmax', 'nested_quant_map', 'quant_state', 'quant_type',
'blocksize', 'dtype', 'shape', 'nested_blocksize', 'nested_dtype', 'nested_offset']
at: bitsandbytes.functional.QuantState.__init__
self.absmax = absmax
self.shape = shape
self.dtype = dtype
self.blocksize = blocksize
self.quant_type = quant_type
self.offset = offset
self.state2 = state2
self.nested = state2 is not None
at: bitsandbytes.functional.QuantState.to
self.absmax = self.absmax.to(device)
self.offset = self.offset.to(device)
at: bitsandbytes.utils
unpack_tensor_to_dict(tensor_data)
at: torch._C
device(device: Union[_device, _int, str])
device(type: str, index: _int)
at: torch._C._VariableFunctions
tensor(data: Any, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor
at: typing
Dict = _alias(dict, 2, inst=False, name='Dict')
at: typing.MutableMapping
pop(key: _KT) -> _VT
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
===========changed ref 0===========
# module: bitsandbytes.autograd._functions
- tensor = torch.Tensor
-
-
# The inverse transformation for the colTuring and colAmpere format were contributed by Alex Borzunov:
# https://github.com/bigscience-workshop/petals/blob/main/src/petals/utils/linear8bitlt_patch.py
"""
This class pools outlier dimensions across layers.
This is particularly important for small models where outlier features
are less systematic and occur with low frequency.
"""
===========changed ref 1===========
# module: bitsandbytes.nn.triton_based_modules
class SwitchBackLinear(nn.Linear):
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
device=None,
dtype=None,
vector_wise_quantization: bool = False,
mem_efficient : bool = False,
):
super().__init__(in_features, out_features, bias, device, dtype)
+ if not is_triton_available():
- if not is_triton_available:
raise ImportError('''Could not import triton. Please install triton to use SwitchBackLinear.
Alternatively, you can use bnb.nn.SwitchBackLinearBnb, but it will be slower''')
# By default, we use the global quantization.
self.vector_wise_quantization = vector_wise_quantization
if self.vector_wise_quantization:
self._fn = _switchback_vectorrize
if mem_efficient:
print('mem efficient is not supported for vector-wise quantization.')
exit(1)
else:
if mem_efficient:
self._fn = _switchback_global_mem_efficient
else:
self._fn = _switchback_global
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
# these are the most common libs names
# libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
# we have libcudart.so.11.0 which causes a lot of errors before
# not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
system = platform.system()
if system == 'Windows':
+ CUDA_RUNTIME_LIBS = ["nvcuda.dll"]
- CUDA_RUNTIME_LIBS: list = ["nvcuda.dll"]
else: # Linux or other
+ CUDA_RUNTIME_LIBS = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
- CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
# this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths
backup_paths = []
backup_paths.append('$CONDA_PREFIX/lib/libcudart.so.11.0')
|
bitsandbytes.cuda_setup.main/remove_non_existent_dirs
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
706ec24d2f5717cf484191d2f09011432640a8e6
|
Ruff fixes (#984)
|
<5>:<add> except PermissionError:
<del> except PermissionError as pex:
<15>:<add> CUDASetup.get_instance().add_log_entry(
<add> f"The following directories listed in your path were found to be non-existent: {non_existent_directories}",
<add> is_warning=False,
<add> )
<del> CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
<16>:<del> f"be non-existent: {non_existent_directories}", is_warning=False)
|
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
<0> existent_directories: Set[Path] = set()
<1> for path in candidate_paths:
<2> try:
<3> if path.exists():
<4> existent_directories.add(path)
<5> except PermissionError as pex:
<6> # Handle the PermissionError first as it is a subtype of OSError
<7> # https://docs.python.org/3/library/exceptions.html#exception-hierarchy
<8> pass
<9> except OSError as exc:
<10> if exc.errno != errno.ENAMETOOLONG:
<11> raise exc
<12>
<13> non_existent_directories: Set[Path] = candidate_paths - existent_directories
<14> if non_existent_directories:
<15> CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
<16> f"be non-existent: {non_existent_directories}", is_warning=False)
<17>
<18> return existent_directories
<19>
| |
bitsandbytes.cuda_setup.main/evaluate_cuda_setup
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
706ec24d2f5717cf484191d2f09011432640a8e6
|
Ruff fixes (#984)
|
<17>:<add> cuda_setup.add_log_entry(
<add> "CUDA SETUP: To manually override the PyTorch CUDA version please see:"
<del> cuda_setup.add_log_entry(f"CUDA SETUP: To manually override the PyTorch CUDA version please see:"
<18>:<add> "https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md"
<del> "https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md")
<19>:<add> )
|
# module: bitsandbytes.cuda_setup.main
def evaluate_cuda_setup():
<0> cuda_setup = CUDASetup.get_instance()
<1> suffix = ".so" if os.name != "nt" else ".dll"
<2> if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
<3> cuda_setup.add_log_entry('')
<4> cuda_setup.add_log_entry('='*35 + 'BUG REPORT' + '='*35)
<5> cuda_setup.add_log_entry(('Welcome to bitsandbytes. For bug reports, please run\n\npython -m bitsandbytes\n\n'),
<6> ('and submit this information together with your error trace to: https://github.com/TimDettmers/bitsandbytes/issues'))
<7> cuda_setup.add_log_entry('='*80)
<8> if not torch.cuda.is_available(): return f'libbitsandbytes_cpu{suffix}', None, None, None
<9>
<10> cudart_path = determine_cuda_runtime_lib_path()
<11> ccs = get_compute_capabilities()
<12> ccs.sort()
<13> cc = ccs[-1] # we take the highest capability
<14> cuda_version_string = get_cuda_version()
<15>
<16> cuda_setup.add_log_entry(f"CUDA SETUP: PyTorch settings found: CUDA_VERSION={cuda_version_string}, Highest Compute Capability: {cc}.")
<17> cuda_setup.add_log_entry(f"CUDA SETUP: To manually override the PyTorch CUDA version please see:"
<18> "https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md")
<19>
<20>
<21> # 7.5 is the minimum CC vor cublaslt
<22> has_cublaslt = is_cublasLt_compatible(cc)
<23>
<24> # TODO:
<25> # (1) CU</s>
|
===========below chunk 0===========
# module: bitsandbytes.cuda_setup.main
def evaluate_cuda_setup():
# offset: 1
# (2) Multiple CUDA versions installed
# we use ls -l instead of nvcc to determine the cuda version
# since most installations will have the libcudart.so installed, but not the compiler
if has_cublaslt:
binary_name = f"libbitsandbytes_cuda{cuda_version_string}"
else:
"if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt"
binary_name = f"libbitsandbytes_cuda{cuda_version_string}_nocublaslt"
binary_name = f"{binary_name}{suffix}"
return binary_name, cudart_path, cc, cuda_version_string
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
+ except PermissionError:
- except PermissionError as pex:
# Handle the PermissionError first as it is a subtype of OSError
# https://docs.python.org/3/library/exceptions.html#exception-hierarchy
pass
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
+ CUDASetup.get_instance().add_log_entry(
+ f"The following directories listed in your path were found to be non-existent: {non_existent_directories}",
+ is_warning=False,
+ )
- CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
- f"be non-existent: {non_existent_directories}", is_warning=False)
return existent_directories
|
bitsandbytes.nn.modules/Linear4bit.set_compute_type
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
706ec24d2f5717cf484191d2f09011432640a8e6
|
Ruff fixes (#984)
|
<9>:<add> warnings.warn('Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.')
<del> warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.')
<12>:<add> warnings.warn('Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.')
<del> warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.')
|
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
def set_compute_type(self, x):
<0> if x.dtype in [torch.float32, torch.bfloat16]:
<1> # the input is in a dtype that is safe to compute in, we switch
<2> # to this type for speed and stability
<3> self.compute_dtype = x.dtype
<4> elif x.dtype == torch.float16:
<5> # we take the compoute dtype passed into the layer
<6> if self.compute_dtype == torch.float32 and (x.numel() == x.shape[-1]):
<7> # single batch inference with input torch.float16 and compute_dtype float32 -> slow inference when it could be fast
<8> # warn the user about this
<9> warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.')
<10> warnings.filterwarnings('ignore', message='.*inference.')
<11> if self.compute_dtype == torch.float32 and (x.numel() != x.shape[-1]):
<12> warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.')
<13> warnings.filterwarnings('ignore', message='.*inference or training')
<14>
|
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
+ except PermissionError:
- except PermissionError as pex:
# Handle the PermissionError first as it is a subtype of OSError
# https://docs.python.org/3/library/exceptions.html#exception-hierarchy
pass
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
+ CUDASetup.get_instance().add_log_entry(
+ f"The following directories listed in your path were found to be non-existent: {non_existent_directories}",
+ is_warning=False,
+ )
- CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
- f"be non-existent: {non_existent_directories}", is_warning=False)
return existent_directories
===========changed ref 1===========
# module: bitsandbytes.cextension
setup = CUDASetup.get_instance()
if setup.initialized != True:
setup.run_cuda_setup()
lib = setup.lib
try:
if lib is None and torch.cuda.is_available():
CUDASetup.get_instance().generate_instructions()
CUDASetup.get_instance().print_log_stack()
raise RuntimeError('''
CUDA Setup failed despite GPU being available. Please run the following command to get more information:
python -m bitsandbytes
Inspect the output of the command and see if you can locate CUDA libraries. You might need to add them
to your LD_LIBRARY_PATH. If you suspect a bug, please take the information from python -m bitsandbytes
and open an issue at: https://github.com/TimDettmers/bitsandbytes/issues''')
+ _ = lib.cadam32bit_grad_fp32 # runs on an error if the library could not be found -> COMPILED_WITH_CUDA=False
- lib.cadam32bit_grad_fp32 # runs on an error if the library could not be found -> COMPILED_WITH_CUDA=False
lib.get_context.restype = ct.c_void_p
lib.get_cusparse.restype = ct.c_void_p
lib.cget_managed_ptr.restype = ct.c_void_p
COMPILED_WITH_CUDA = True
except AttributeError as ex:
warn("The installed version of bitsandbytes was compiled without GPU support. "
"8-bit optimizers, 8-bit multiplication, and GPU quantization are unavailable.")
COMPILED_WITH_CUDA = False
print(str(ex))
# print the setup details after checking for errors so we do not print twice
#if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
#setup.print_log_stack()
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
def evaluate_cuda_setup():
cuda_setup = CUDASetup.get_instance()
suffix = ".so" if os.name != "nt" else ".dll"
if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
cuda_setup.add_log_entry('')
cuda_setup.add_log_entry('='*35 + 'BUG REPORT' + '='*35)
cuda_setup.add_log_entry(('Welcome to bitsandbytes. For bug reports, please run\n\npython -m bitsandbytes\n\n'),
('and submit this information together with your error trace to: https://github.com/TimDettmers/bitsandbytes/issues'))
cuda_setup.add_log_entry('='*80)
if not torch.cuda.is_available(): return f'libbitsandbytes_cpu{suffix}', None, None, None
cudart_path = determine_cuda_runtime_lib_path()
ccs = get_compute_capabilities()
ccs.sort()
cc = ccs[-1] # we take the highest capability
cuda_version_string = get_cuda_version()
cuda_setup.add_log_entry(f"CUDA SETUP: PyTorch settings found: CUDA_VERSION={cuda_version_string}, Highest Compute Capability: {cc}.")
+ cuda_setup.add_log_entry(
+ "CUDA SETUP: To manually override the PyTorch CUDA version please see:"
- cuda_setup.add_log_entry(f"CUDA SETUP: To manually override the PyTorch CUDA version please see:"
+ "https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md"
- "https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda</s>
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
def evaluate_cuda_setup():
# offset: 1
<s>github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md")
+ )
# 7.5 is the minimum CC vor cublaslt
has_cublaslt = is_cublasLt_compatible(cc)
# TODO:
# (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible)
# (2) Multiple CUDA versions installed
# we use ls -l instead of nvcc to determine the cuda version
# since most installations will have the libcudart.so installed, but not the compiler
if has_cublaslt:
binary_name = f"libbitsandbytes_cuda{cuda_version_string}"
else:
"if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt"
binary_name = f"libbitsandbytes_cuda{cuda_version_string}_nocublaslt"
binary_name = f"{binary_name}{suffix}"
return binary_name, cudart_path, cc, cuda_version_string
|
bitsandbytes.nn.modules/Int8Params.cuda
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
706ec24d2f5717cf484191d2f09011432640a8e6
|
Ruff fixes (#984)
|
<10>:<add> self.CB = CB
<add> self.SCB = SCB
<del> setattr(self, "CB", CB)
<11>:<del> setattr(self, "SCB", SCB)
|
# module: bitsandbytes.nn.modules
class Int8Params(torch.nn.Parameter):
def cuda(self, device):
<0> if self.has_fp16_weights:
<1> return super().cuda(device)
<2> else:
<3> # we store the 8-bit rows-major weight
<4> # we convert this weight to the turning/ampere weight during the first inference pass
<5> B = self.data.contiguous().half().cuda(device)
<6> CB, CBt, SCB, SCBt, coo_tensorB = bnb.functional.double_quant(B)
<7> del CBt
<8> del SCBt
<9> self.data = CB
<10> setattr(self, "CB", CB)
<11> setattr(self, "SCB", SCB)
<12>
<13> return self
<14>
|
===========changed ref 0===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
def set_compute_type(self, x):
if x.dtype in [torch.float32, torch.bfloat16]:
# the input is in a dtype that is safe to compute in, we switch
# to this type for speed and stability
self.compute_dtype = x.dtype
elif x.dtype == torch.float16:
# we take the compoute dtype passed into the layer
if self.compute_dtype == torch.float32 and (x.numel() == x.shape[-1]):
# single batch inference with input torch.float16 and compute_dtype float32 -> slow inference when it could be fast
# warn the user about this
+ warnings.warn('Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.')
- warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.')
warnings.filterwarnings('ignore', message='.*inference.')
if self.compute_dtype == torch.float32 and (x.numel() != x.shape[-1]):
+ warnings.warn('Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.')
- warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.')
warnings.filterwarnings('ignore', message='.*inference or training')
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
+ except PermissionError:
- except PermissionError as pex:
# Handle the PermissionError first as it is a subtype of OSError
# https://docs.python.org/3/library/exceptions.html#exception-hierarchy
pass
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
+ CUDASetup.get_instance().add_log_entry(
+ f"The following directories listed in your path were found to be non-existent: {non_existent_directories}",
+ is_warning=False,
+ )
- CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
- f"be non-existent: {non_existent_directories}", is_warning=False)
return existent_directories
===========changed ref 2===========
# module: bitsandbytes.cextension
setup = CUDASetup.get_instance()
if setup.initialized != True:
setup.run_cuda_setup()
lib = setup.lib
try:
if lib is None and torch.cuda.is_available():
CUDASetup.get_instance().generate_instructions()
CUDASetup.get_instance().print_log_stack()
raise RuntimeError('''
CUDA Setup failed despite GPU being available. Please run the following command to get more information:
python -m bitsandbytes
Inspect the output of the command and see if you can locate CUDA libraries. You might need to add them
to your LD_LIBRARY_PATH. If you suspect a bug, please take the information from python -m bitsandbytes
and open an issue at: https://github.com/TimDettmers/bitsandbytes/issues''')
+ _ = lib.cadam32bit_grad_fp32 # runs on an error if the library could not be found -> COMPILED_WITH_CUDA=False
- lib.cadam32bit_grad_fp32 # runs on an error if the library could not be found -> COMPILED_WITH_CUDA=False
lib.get_context.restype = ct.c_void_p
lib.get_cusparse.restype = ct.c_void_p
lib.cget_managed_ptr.restype = ct.c_void_p
COMPILED_WITH_CUDA = True
except AttributeError as ex:
warn("The installed version of bitsandbytes was compiled without GPU support. "
"8-bit optimizers, 8-bit multiplication, and GPU quantization are unavailable.")
COMPILED_WITH_CUDA = False
print(str(ex))
# print the setup details after checking for errors so we do not print twice
#if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
#setup.print_log_stack()
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
def evaluate_cuda_setup():
cuda_setup = CUDASetup.get_instance()
suffix = ".so" if os.name != "nt" else ".dll"
if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
cuda_setup.add_log_entry('')
cuda_setup.add_log_entry('='*35 + 'BUG REPORT' + '='*35)
cuda_setup.add_log_entry(('Welcome to bitsandbytes. For bug reports, please run\n\npython -m bitsandbytes\n\n'),
('and submit this information together with your error trace to: https://github.com/TimDettmers/bitsandbytes/issues'))
cuda_setup.add_log_entry('='*80)
if not torch.cuda.is_available(): return f'libbitsandbytes_cpu{suffix}', None, None, None
cudart_path = determine_cuda_runtime_lib_path()
ccs = get_compute_capabilities()
ccs.sort()
cc = ccs[-1] # we take the highest capability
cuda_version_string = get_cuda_version()
cuda_setup.add_log_entry(f"CUDA SETUP: PyTorch settings found: CUDA_VERSION={cuda_version_string}, Highest Compute Capability: {cc}.")
+ cuda_setup.add_log_entry(
+ "CUDA SETUP: To manually override the PyTorch CUDA version please see:"
- cuda_setup.add_log_entry(f"CUDA SETUP: To manually override the PyTorch CUDA version please see:"
+ "https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md"
- "https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda</s>
|
bitsandbytes.functional/get_4bit_type
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
706ec24d2f5717cf484191d2f09011432640a8e6
|
Ruff fixes (#984)
|
# module: bitsandbytes.functional
def get_4bit_type(typename, device=None, blocksize=64):
<0> if device is None: device = 'cuda'
<1> data = None
<2> if typename == 'nf4':
<3> ''' Implements the NF4 data type.
<4>
<5> Constructs a quantization data type where each bin has equal area under a standard normal distribution N(0, 1) that
<6> is normalized into the range [-1, 1].
<7>
<8> For more information read the paper: QLoRA: Efficient Finetuning of Quantized LLMs (https://arxiv.org/abs/2305.14314)
<9>
<10> Implementation of the NF4 data type in bitsandbytes can be found in the `create_normal_map` function in
<11> the `functional.py` file: https://github.com/TimDettmers/bitsandbytes/blob/main/bitsandbytes/functional.py#L236.
<12> '''
<13> data = [-1.0, -0.6961928009986877, -0.5250730514526367, -0.39491748809814453, -0.28444138169288635,
<14> -0.18477343022823334, -0.09105003625154495, 0.0, 0.07958029955625534, 0.16093020141124725,
<15> 0.24611230194568634, 0.33791524171829224, 0.44070982933044434, 0.5626170039176941,
<16> 0.7229568362236023, 1.0]
<17> elif typename == 'fp4':
<18> # 0b000 = 0
<19> # 0b001 = 0.0625
<20> # 0b010 = 8
<21> # 0b011 = 12
<22> # 0b100 = 4
<23> # 0b101 = 6
<24> # 0b110 = 2
<25> # 0b111 = 3
<26> </s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def get_4bit_type(typename, device=None, blocksize=64):
# offset: 1
data = [0, 0.0625, 8.0, 12.0, 4.0, 6.0, 2.0, 3.0, -0, -0.0625, -8.0, -12.0, -4.0, -6.0, -2.0, -3.0]
elif typename == 'int4':
data = [7, 6, 5, 4, 3, 2, 1, 0, -0, -1, -2, -3, -4, -5, -6, -7]
elif typename == 'af4':
# Taken from: NF4 Isn't Information Theoretically Optimal (and that's Good)
# https://arxiv.org/abs/2306.06965
if blocksize == 64:
data = [-1., -0.69441008, -0.51243739, -0.3736951, -0.25607552, -0.14982478,
-0.04934812, 0., 0.04273164, 0.12934483, 0.21961274, 0.31675666,
0.42563882, 0.55496234, 0.72424863, 1.][::-1]
else:
raise NotImplementedError(f'4-bit AbnormalFloats currently only support blocksize 64.')
if data is None:
raise NotImplementedError(f'Typename {typename} not supported')
data = Tensor(data)
data /= data.abs().max()
assert data.numel() == 16
return data.to(device)
===========changed ref 0===========
# module: bitsandbytes.nn.modules
class Int8Params(torch.nn.Parameter):
def cuda(self, device):
if self.has_fp16_weights:
return super().cuda(device)
else:
# we store the 8-bit rows-major weight
# we convert this weight to the turning/ampere weight during the first inference pass
B = self.data.contiguous().half().cuda(device)
CB, CBt, SCB, SCBt, coo_tensorB = bnb.functional.double_quant(B)
del CBt
del SCBt
self.data = CB
+ self.CB = CB
+ self.SCB = SCB
- setattr(self, "CB", CB)
- setattr(self, "SCB", SCB)
return self
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
+ except PermissionError:
- except PermissionError as pex:
# Handle the PermissionError first as it is a subtype of OSError
# https://docs.python.org/3/library/exceptions.html#exception-hierarchy
pass
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
+ CUDASetup.get_instance().add_log_entry(
+ f"The following directories listed in your path were found to be non-existent: {non_existent_directories}",
+ is_warning=False,
+ )
- CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
- f"be non-existent: {non_existent_directories}", is_warning=False)
return existent_directories
===========changed ref 2===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
def set_compute_type(self, x):
if x.dtype in [torch.float32, torch.bfloat16]:
# the input is in a dtype that is safe to compute in, we switch
# to this type for speed and stability
self.compute_dtype = x.dtype
elif x.dtype == torch.float16:
# we take the compoute dtype passed into the layer
if self.compute_dtype == torch.float32 and (x.numel() == x.shape[-1]):
# single batch inference with input torch.float16 and compute_dtype float32 -> slow inference when it could be fast
# warn the user about this
+ warnings.warn('Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.')
- warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.')
warnings.filterwarnings('ignore', message='.*inference.')
if self.compute_dtype == torch.float32 and (x.numel() != x.shape[-1]):
+ warnings.warn('Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.')
- warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.')
warnings.filterwarnings('ignore', message='.*inference or training')
===========changed ref 3===========
# module: bitsandbytes.cextension
setup = CUDASetup.get_instance()
if setup.initialized != True:
setup.run_cuda_setup()
lib = setup.lib
try:
if lib is None and torch.cuda.is_available():
CUDASetup.get_instance().generate_instructions()
CUDASetup.get_instance().print_log_stack()
raise RuntimeError('''
CUDA Setup failed despite GPU being available. Please run the following command to get more information:
python -m bitsandbytes
Inspect the output of the command and see if you can locate CUDA libraries. You might need to add them
to your LD_LIBRARY_PATH. If you suspect a bug, please take the information from python -m bitsandbytes
and open an issue at: https://github.com/TimDettmers/bitsandbytes/issues''')
+ _ = lib.cadam32bit_grad_fp32 # runs on an error if the library could not be found -> COMPILED_WITH_CUDA=False
- lib.cadam32bit_grad_fp32 # runs on an error if the library could not be found -> COMPILED_WITH_CUDA=False
lib.get_context.restype = ct.c_void_p
lib.get_cusparse.restype = ct.c_void_p
lib.cget_managed_ptr.restype = ct.c_void_p
COMPILED_WITH_CUDA = True
except AttributeError as ex:
warn("The installed version of bitsandbytes was compiled without GPU support. "
"8-bit optimizers, 8-bit multiplication, and GPU quantization are unavailable.")
COMPILED_WITH_CUDA = False
print(str(ex))
# print the setup details after checking for errors so we do not print twice
#if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
#setup.print_log_stack()
|
|
bitsandbytes.functional/gemv_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
706ec24d2f5717cf484191d2f09011432640a8e6
|
Ruff fixes (#984)
|
<3>:<add> raise ValueError('state cannot None. gem_4bit( ) requires the state from quantize_4bit( )')
<del> raise ValueError(f'state cannot None. gem_4bit( ) requires the state from quantize_4bit( )')
<6>:<add> raise ValueError('Dimensions of A are invalid. Must be a vector with the leading dimensions of "1", e.g. [1, 1, 2048]')
<del> raise ValueError(f'Dimensions of A are invalid. Must be a vector with the leading dimensions of "1", e.g. [1, 1, 2048]')
|
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Optional[torch.Tensor] = None,
transposed_A=False,
transposed_B=False,
state=None
):
<0> prev_device = pre_call(A.device)
<1> #sout = check_matmul(A, B, out, transposed_A, transposed_B, expected_type=A.dtype)
<2> if state is None:
<3> raise ValueError(f'state cannot None. gem_4bit( ) requires the state from quantize_4bit( )')
<4>
<5> if A.numel() != A.shape[-1]:
<6> raise ValueError(f'Dimensions of A are invalid. Must be a vector with the leading dimensions of "1", e.g. [1, 1, 2048]')
<7>
<8> Bshape = state.shape
<9> bout = Bshape[0]
<10> absmax = state.absmax
<11> if state.nested:
<12> absmax = dequantize_blockwise(state.absmax, state.state2)
<13> absmax += state.offset
<14>
<15> if out is None:
<16> if len(A.shape) == 3:
<17> out = torch.empty(size=(A.shape[0], A.shape[1], bout), dtype=A.dtype, device=A.device)
<18> else:
<19> out = torch.empty(size=(A.shape[0], bout), dtype=A.dtype, device=A.device)
<20>
<21> n = 1
<22> m = Bshape[0]
<23> k = Bshape[1]
<24> lda = Bshape[0]
<25> ldc = Bshape[0]
<26> ldb = (A.shape[-1]+1)//2
<27> is_on_gpu([B, A, out, absmax, state.code])
<28> m = ct.c_int32(m)
<29> n = ct.c_int32(n)
<30> k = ct.c_int32(k</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def gemv_4bit(
A: Tensor,
B: Tensor,
out: Optional[torch.Tensor] = None,
transposed_A=False,
transposed_B=False,
state=None
):
# offset: 1
lda = ct.c_int32(lda)
ldb = ct.c_int32(ldb)
ldc = ct.c_int32(ldc)
if B.dtype in [torch.uint8, torch.bfloat16, torch.float16, torch.float32]:
if A.dtype == torch.float16:
lib.cgemm_4bit_inference_naive_fp16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(state.code), get_ptr(out), lda, ldb, ldc, ct.c_int32(state.blocksize))
elif A.dtype == torch.bfloat16:
lib.cgemm_4bit_inference_naive_bf16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(state.code), get_ptr(out), lda, ldb, ldc, ct.c_int32(state.blocksize))
elif A.dtype == torch.float32:
lib.cgemm_4bit_inference_naive_fp32(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(state.code), get_ptr(out), lda, ldb, ldc, ct.c_int32(state.blocksize))
else:
raise NotImplementedError(f'Matmul not implemented for data type {A.dtype}')
else:
raise NotImplementedError(f'Matmul not implemented for data type {A.dtype}')
post_call(prev_device)
return out
===========changed ref 0===========
# module: bitsandbytes.nn.modules
class Int8Params(torch.nn.Parameter):
def cuda(self, device):
if self.has_fp16_weights:
return super().cuda(device)
else:
# we store the 8-bit rows-major weight
# we convert this weight to the turning/ampere weight during the first inference pass
B = self.data.contiguous().half().cuda(device)
CB, CBt, SCB, SCBt, coo_tensorB = bnb.functional.double_quant(B)
del CBt
del SCBt
self.data = CB
+ self.CB = CB
+ self.SCB = SCB
- setattr(self, "CB", CB)
- setattr(self, "SCB", SCB)
return self
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
+ except PermissionError:
- except PermissionError as pex:
# Handle the PermissionError first as it is a subtype of OSError
# https://docs.python.org/3/library/exceptions.html#exception-hierarchy
pass
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
+ CUDASetup.get_instance().add_log_entry(
+ f"The following directories listed in your path were found to be non-existent: {non_existent_directories}",
+ is_warning=False,
+ )
- CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
- f"be non-existent: {non_existent_directories}", is_warning=False)
return existent_directories
===========changed ref 2===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
def set_compute_type(self, x):
if x.dtype in [torch.float32, torch.bfloat16]:
# the input is in a dtype that is safe to compute in, we switch
# to this type for speed and stability
self.compute_dtype = x.dtype
elif x.dtype == torch.float16:
# we take the compoute dtype passed into the layer
if self.compute_dtype == torch.float32 and (x.numel() == x.shape[-1]):
# single batch inference with input torch.float16 and compute_dtype float32 -> slow inference when it could be fast
# warn the user about this
+ warnings.warn('Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.')
- warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.')
warnings.filterwarnings('ignore', message='.*inference.')
if self.compute_dtype == torch.float32 and (x.numel() != x.shape[-1]):
+ warnings.warn('Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.')
- warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.')
warnings.filterwarnings('ignore', message='.*inference or training')
===========changed ref 3===========
# module: bitsandbytes.cextension
setup = CUDASetup.get_instance()
if setup.initialized != True:
setup.run_cuda_setup()
lib = setup.lib
try:
if lib is None and torch.cuda.is_available():
CUDASetup.get_instance().generate_instructions()
CUDASetup.get_instance().print_log_stack()
raise RuntimeError('''
CUDA Setup failed despite GPU being available. Please run the following command to get more information:
python -m bitsandbytes
Inspect the output of the command and see if you can locate CUDA libraries. You might need to add them
to your LD_LIBRARY_PATH. If you suspect a bug, please take the information from python -m bitsandbytes
and open an issue at: https://github.com/TimDettmers/bitsandbytes/issues''')
+ _ = lib.cadam32bit_grad_fp32 # runs on an error if the library could not be found -> COMPILED_WITH_CUDA=False
- lib.cadam32bit_grad_fp32 # runs on an error if the library could not be found -> COMPILED_WITH_CUDA=False
lib.get_context.restype = ct.c_void_p
lib.get_cusparse.restype = ct.c_void_p
lib.cget_managed_ptr.restype = ct.c_void_p
COMPILED_WITH_CUDA = True
except AttributeError as ex:
warn("The installed version of bitsandbytes was compiled without GPU support. "
"8-bit optimizers, 8-bit multiplication, and GPU quantization are unavailable.")
COMPILED_WITH_CUDA = False
print(str(ex))
# print the setup details after checking for errors so we do not print twice
#if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
#setup.print_log_stack()
|
tests.test_optim/get_temp_dir
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
706ec24d2f5717cf484191d2f09011432640a8e6
|
Ruff fixes (#984)
|
<0>:<add> path = f"/tmp/autoswap/{uuid.uuid4()}"
<del> path = f"/tmp/autoswap/{str(uuid.uuid4())}"
|
# module: tests.test_optim
def get_temp_dir():
<0> path = f"/tmp/autoswap/{str(uuid.uuid4())}"
<1> os.makedirs(path, exist_ok=True)
<2> return path
<3>
|
===========unchanged ref 0===========
at: os
makedirs(name: AnyPath, mode: int=..., exist_ok: bool=...) -> None
at: tests.test_optim.get_temp_dir
path = f"/tmp/autoswap/{uuid.uuid4()}"
===========changed ref 0===========
# module: bitsandbytes.functional
+ FIRST_CUDA_DEVICE = torch.device('cuda', index=0)
+
C = 127.0
===========changed ref 1===========
# module: bitsandbytes.nn.modules
class Int8Params(torch.nn.Parameter):
def cuda(self, device):
if self.has_fp16_weights:
return super().cuda(device)
else:
# we store the 8-bit rows-major weight
# we convert this weight to the turning/ampere weight during the first inference pass
B = self.data.contiguous().half().cuda(device)
CB, CBt, SCB, SCBt, coo_tensorB = bnb.functional.double_quant(B)
del CBt
del SCBt
self.data = CB
+ self.CB = CB
+ self.SCB = SCB
- setattr(self, "CB", CB)
- setattr(self, "SCB", SCB)
return self
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
+ except PermissionError:
- except PermissionError as pex:
# Handle the PermissionError first as it is a subtype of OSError
# https://docs.python.org/3/library/exceptions.html#exception-hierarchy
pass
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
+ CUDASetup.get_instance().add_log_entry(
+ f"The following directories listed in your path were found to be non-existent: {non_existent_directories}",
+ is_warning=False,
+ )
- CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
- f"be non-existent: {non_existent_directories}", is_warning=False)
return existent_directories
===========changed ref 3===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
def set_compute_type(self, x):
if x.dtype in [torch.float32, torch.bfloat16]:
# the input is in a dtype that is safe to compute in, we switch
# to this type for speed and stability
self.compute_dtype = x.dtype
elif x.dtype == torch.float16:
# we take the compoute dtype passed into the layer
if self.compute_dtype == torch.float32 and (x.numel() == x.shape[-1]):
# single batch inference with input torch.float16 and compute_dtype float32 -> slow inference when it could be fast
# warn the user about this
+ warnings.warn('Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.')
- warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.')
warnings.filterwarnings('ignore', message='.*inference.')
if self.compute_dtype == torch.float32 and (x.numel() != x.shape[-1]):
+ warnings.warn('Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.')
- warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.')
warnings.filterwarnings('ignore', message='.*inference or training')
===========changed ref 4===========
# module: bitsandbytes.cextension
setup = CUDASetup.get_instance()
if setup.initialized != True:
setup.run_cuda_setup()
lib = setup.lib
try:
if lib is None and torch.cuda.is_available():
CUDASetup.get_instance().generate_instructions()
CUDASetup.get_instance().print_log_stack()
raise RuntimeError('''
CUDA Setup failed despite GPU being available. Please run the following command to get more information:
python -m bitsandbytes
Inspect the output of the command and see if you can locate CUDA libraries. You might need to add them
to your LD_LIBRARY_PATH. If you suspect a bug, please take the information from python -m bitsandbytes
and open an issue at: https://github.com/TimDettmers/bitsandbytes/issues''')
+ _ = lib.cadam32bit_grad_fp32 # runs on an error if the library could not be found -> COMPILED_WITH_CUDA=False
- lib.cadam32bit_grad_fp32 # runs on an error if the library could not be found -> COMPILED_WITH_CUDA=False
lib.get_context.restype = ct.c_void_p
lib.get_cusparse.restype = ct.c_void_p
lib.cget_managed_ptr.restype = ct.c_void_p
COMPILED_WITH_CUDA = True
except AttributeError as ex:
warn("The installed version of bitsandbytes was compiled without GPU support. "
"8-bit optimizers, 8-bit multiplication, and GPU quantization are unavailable.")
COMPILED_WITH_CUDA = False
print(str(ex))
# print the setup details after checking for errors so we do not print twice
#if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
#setup.print_log_stack()
|
bitsandbytes.__main__/find_file_recursive
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
706ec24d2f5717cf484191d2f09011432640a8e6
|
Ruff fixes (#984)
|
<7>:<add> raise RuntimeError('Error: Something when wrong when trying to find file.') from e
<del> raise RuntimeError('Error: Something when wrong when trying to find file. {e}')
|
# module: bitsandbytes.__main__
def find_file_recursive(folder, filename):
<0> import glob
<1> outs = []
<2> try:
<3> for ext in ["so", "dll", "dylib"]:
<4> out = glob.glob(os.path.join(folder, "**", filename + ext))
<5> outs.extend(out)
<6> except Exception as e:
<7> raise RuntimeError('Error: Something when wrong when trying to find file. {e}')
<8>
<9> return outs
<10>
|
===========unchanged ref 0===========
at: bitsandbytes.__main__
print_header(txt: str, width: int=HEADER_WIDTH, filler: str="+") -> None
at: bitsandbytes.__main__.find_file_recursive
outs = []
at: glob
glob(pathname: AnyStr, *, recursive: bool=...) -> List[AnyStr]
at: os.path
join(a: StrPath, *paths: StrPath) -> str
join(a: BytesPath, *paths: BytesPath) -> bytes
===========changed ref 0===========
# module: bitsandbytes.functional
+ FIRST_CUDA_DEVICE = torch.device('cuda', index=0)
+
C = 127.0
===========changed ref 1===========
# module: tests.test_optim
def get_temp_dir():
+ path = f"/tmp/autoswap/{uuid.uuid4()}"
- path = f"/tmp/autoswap/{str(uuid.uuid4())}"
os.makedirs(path, exist_ok=True)
return path
===========changed ref 2===========
# module: bitsandbytes.nn.modules
class Int8Params(torch.nn.Parameter):
def cuda(self, device):
if self.has_fp16_weights:
return super().cuda(device)
else:
# we store the 8-bit rows-major weight
# we convert this weight to the turning/ampere weight during the first inference pass
B = self.data.contiguous().half().cuda(device)
CB, CBt, SCB, SCBt, coo_tensorB = bnb.functional.double_quant(B)
del CBt
del SCBt
self.data = CB
+ self.CB = CB
+ self.SCB = SCB
- setattr(self, "CB", CB)
- setattr(self, "SCB", SCB)
return self
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
+ except PermissionError:
- except PermissionError as pex:
# Handle the PermissionError first as it is a subtype of OSError
# https://docs.python.org/3/library/exceptions.html#exception-hierarchy
pass
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
+ CUDASetup.get_instance().add_log_entry(
+ f"The following directories listed in your path were found to be non-existent: {non_existent_directories}",
+ is_warning=False,
+ )
- CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
- f"be non-existent: {non_existent_directories}", is_warning=False)
return existent_directories
===========changed ref 4===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
def set_compute_type(self, x):
if x.dtype in [torch.float32, torch.bfloat16]:
# the input is in a dtype that is safe to compute in, we switch
# to this type for speed and stability
self.compute_dtype = x.dtype
elif x.dtype == torch.float16:
# we take the compoute dtype passed into the layer
if self.compute_dtype == torch.float32 and (x.numel() == x.shape[-1]):
# single batch inference with input torch.float16 and compute_dtype float32 -> slow inference when it could be fast
# warn the user about this
+ warnings.warn('Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.')
- warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.')
warnings.filterwarnings('ignore', message='.*inference.')
if self.compute_dtype == torch.float32 and (x.numel() != x.shape[-1]):
+ warnings.warn('Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.')
- warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.')
warnings.filterwarnings('ignore', message='.*inference or training')
===========changed ref 5===========
# module: bitsandbytes.cextension
setup = CUDASetup.get_instance()
if setup.initialized != True:
setup.run_cuda_setup()
lib = setup.lib
try:
if lib is None and torch.cuda.is_available():
CUDASetup.get_instance().generate_instructions()
CUDASetup.get_instance().print_log_stack()
raise RuntimeError('''
CUDA Setup failed despite GPU being available. Please run the following command to get more information:
python -m bitsandbytes
Inspect the output of the command and see if you can locate CUDA libraries. You might need to add them
to your LD_LIBRARY_PATH. If you suspect a bug, please take the information from python -m bitsandbytes
and open an issue at: https://github.com/TimDettmers/bitsandbytes/issues''')
+ _ = lib.cadam32bit_grad_fp32 # runs on an error if the library could not be found -> COMPILED_WITH_CUDA=False
- lib.cadam32bit_grad_fp32 # runs on an error if the library could not be found -> COMPILED_WITH_CUDA=False
lib.get_context.restype = ct.c_void_p
lib.get_cusparse.restype = ct.c_void_p
lib.cget_managed_ptr.restype = ct.c_void_p
COMPILED_WITH_CUDA = True
except AttributeError as ex:
warn("The installed version of bitsandbytes was compiled without GPU support. "
"8-bit optimizers, 8-bit multiplication, and GPU quantization are unavailable.")
COMPILED_WITH_CUDA = False
print(str(ex))
# print the setup details after checking for errors so we do not print twice
#if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
#setup.print_log_stack()
|
bitsandbytes.__main__/generate_bug_report_information
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
706ec24d2f5717cf484191d2f09011432640a8e6
|
Ruff fixes (#984)
|
<36>:<add> except Exception as e:
<del> except:
<37>:<add> print(f'Could not read LD_LIBRARY_PATH: {path} ({e})')
<del> print(f'Could not read LD_LIBRARY_PATH: {path}')
|
# module: bitsandbytes.__main__
def generate_bug_report_information():
<0> print_header("")
<1> print_header("BUG REPORT INFORMATION")
<2> print_header("")
<3> print('')
<4>
<5> if 'CONDA_PREFIX' in os.environ:
<6> paths = find_file_recursive(os.environ['CONDA_PREFIX'], '*cuda*')
<7> print_header("ANACONDA CUDA PATHS")
<8> print(paths)
<9> print('')
<10> if isdir('/usr/local/'):
<11> paths = find_file_recursive('/usr/local', '*cuda*')
<12> print_header("/usr/local CUDA PATHS")
<13> print(paths)
<14> print('')
<15> if 'CUDA_PATH' in os.environ and isdir(os.environ['CUDA_PATH']):
<16> paths = find_file_recursive(os.environ['CUDA_PATH'], '*cuda*')
<17> print_header("CUDA PATHS")
<18> print(paths)
<19> print('')
<20>
<21> if isdir(os.getcwd()):
<22> paths = find_file_recursive(os.getcwd(), '*cuda*')
<23> print_header("WORKING DIRECTORY CUDA PATHS")
<24> print(paths)
<25> print('')
<26>
<27> print_header("LD_LIBRARY CUDA PATHS")
<28> if 'LD_LIBRARY_PATH' in os.environ:
<29> lib_path = os.environ['LD_LIBRARY_PATH'].strip()
<30> for path in set(lib_path.split(os.pathsep)):
<31> try:
<32> if isdir(path):
<33> print_header(f"{path} CUDA PATHS")
<34> paths = find_file_recursive(path, '*cuda*')
<35> print(paths)
<36> except:
<37> print(f'Could not read LD_LIBRARY_PATH: {path}')
<38> print('')
<39>
|
===========unchanged ref 0===========
at: bitsandbytes.__main__
HEADER_WIDTH = 60
find_file_recursive(folder, filename)
at: os
getcwd() -> str
environ = _createenviron()
at: os.path
isdir(s: AnyPath) -> bool
===========changed ref 0===========
# module: bitsandbytes.__main__
def find_file_recursive(folder, filename):
import glob
outs = []
try:
for ext in ["so", "dll", "dylib"]:
out = glob.glob(os.path.join(folder, "**", filename + ext))
outs.extend(out)
except Exception as e:
+ raise RuntimeError('Error: Something when wrong when trying to find file.') from e
- raise RuntimeError('Error: Something when wrong when trying to find file. {e}')
return outs
===========changed ref 1===========
# module: bitsandbytes.functional
+ FIRST_CUDA_DEVICE = torch.device('cuda', index=0)
+
C = 127.0
===========changed ref 2===========
# module: tests.test_optim
def get_temp_dir():
+ path = f"/tmp/autoswap/{uuid.uuid4()}"
- path = f"/tmp/autoswap/{str(uuid.uuid4())}"
os.makedirs(path, exist_ok=True)
return path
===========changed ref 3===========
# module: bitsandbytes.nn.modules
class Int8Params(torch.nn.Parameter):
def cuda(self, device):
if self.has_fp16_weights:
return super().cuda(device)
else:
# we store the 8-bit rows-major weight
# we convert this weight to the turning/ampere weight during the first inference pass
B = self.data.contiguous().half().cuda(device)
CB, CBt, SCB, SCBt, coo_tensorB = bnb.functional.double_quant(B)
del CBt
del SCBt
self.data = CB
+ self.CB = CB
+ self.SCB = SCB
- setattr(self, "CB", CB)
- setattr(self, "SCB", SCB)
return self
===========changed ref 4===========
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
+ except PermissionError:
- except PermissionError as pex:
# Handle the PermissionError first as it is a subtype of OSError
# https://docs.python.org/3/library/exceptions.html#exception-hierarchy
pass
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
+ CUDASetup.get_instance().add_log_entry(
+ f"The following directories listed in your path were found to be non-existent: {non_existent_directories}",
+ is_warning=False,
+ )
- CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
- f"be non-existent: {non_existent_directories}", is_warning=False)
return existent_directories
===========changed ref 5===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
def set_compute_type(self, x):
if x.dtype in [torch.float32, torch.bfloat16]:
# the input is in a dtype that is safe to compute in, we switch
# to this type for speed and stability
self.compute_dtype = x.dtype
elif x.dtype == torch.float16:
# we take the compoute dtype passed into the layer
if self.compute_dtype == torch.float32 and (x.numel() == x.shape[-1]):
# single batch inference with input torch.float16 and compute_dtype float32 -> slow inference when it could be fast
# warn the user about this
+ warnings.warn('Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.')
- warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.')
warnings.filterwarnings('ignore', message='.*inference.')
if self.compute_dtype == torch.float32 and (x.numel() != x.shape[-1]):
+ warnings.warn('Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.')
- warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.')
warnings.filterwarnings('ignore', message='.*inference or training')
===========changed ref 6===========
# module: bitsandbytes.cextension
setup = CUDASetup.get_instance()
if setup.initialized != True:
setup.run_cuda_setup()
lib = setup.lib
try:
if lib is None and torch.cuda.is_available():
CUDASetup.get_instance().generate_instructions()
CUDASetup.get_instance().print_log_stack()
raise RuntimeError('''
CUDA Setup failed despite GPU being available. Please run the following command to get more information:
python -m bitsandbytes
Inspect the output of the command and see if you can locate CUDA libraries. You might need to add them
to your LD_LIBRARY_PATH. If you suspect a bug, please take the information from python -m bitsandbytes
and open an issue at: https://github.com/TimDettmers/bitsandbytes/issues''')
+ _ = lib.cadam32bit_grad_fp32 # runs on an error if the library could not be found -> COMPILED_WITH_CUDA=False
- lib.cadam32bit_grad_fp32 # runs on an error if the library could not be found -> COMPILED_WITH_CUDA=False
lib.get_context.restype = ct.c_void_p
lib.get_cusparse.restype = ct.c_void_p
lib.cget_managed_ptr.restype = ct.c_void_p
COMPILED_WITH_CUDA = True
except AttributeError as ex:
warn("The installed version of bitsandbytes was compiled without GPU support. "
"8-bit optimizers, 8-bit multiplication, and GPU quantization are unavailable.")
COMPILED_WITH_CUDA = False
print(str(ex))
# print the setup details after checking for errors so we do not print twice
#if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
#setup.print_log_stack()
|
bitsandbytes.__main__/print_debug_info
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
706ec24d2f5717cf484191d2f09011432640a8e6
|
Ruff fixes (#984)
|
<0>:<add> from . import PACKAGE_GITHUB_URL
|
# module: bitsandbytes.__main__
def print_debug_info() -> None:
<0> print(
<1> "\nAbove we output some debug information. Please provide this info when "
<2> f"creating an issue via {PACKAGE_GITHUB_URL}/issues/new/choose ...\n"
<3> )
<4>
|
===========unchanged ref 0===========
at: bitsandbytes.__main__
generate_bug_report_information()
at: bitsandbytes.cextension
COMPILED_WITH_CUDA = True
COMPILED_WITH_CUDA = False
===========changed ref 0===========
# module: bitsandbytes.__main__
def generate_bug_report_information():
print_header("")
print_header("BUG REPORT INFORMATION")
print_header("")
print('')
if 'CONDA_PREFIX' in os.environ:
paths = find_file_recursive(os.environ['CONDA_PREFIX'], '*cuda*')
print_header("ANACONDA CUDA PATHS")
print(paths)
print('')
if isdir('/usr/local/'):
paths = find_file_recursive('/usr/local', '*cuda*')
print_header("/usr/local CUDA PATHS")
print(paths)
print('')
if 'CUDA_PATH' in os.environ and isdir(os.environ['CUDA_PATH']):
paths = find_file_recursive(os.environ['CUDA_PATH'], '*cuda*')
print_header("CUDA PATHS")
print(paths)
print('')
if isdir(os.getcwd()):
paths = find_file_recursive(os.getcwd(), '*cuda*')
print_header("WORKING DIRECTORY CUDA PATHS")
print(paths)
print('')
print_header("LD_LIBRARY CUDA PATHS")
if 'LD_LIBRARY_PATH' in os.environ:
lib_path = os.environ['LD_LIBRARY_PATH'].strip()
for path in set(lib_path.split(os.pathsep)):
try:
if isdir(path):
print_header(f"{path} CUDA PATHS")
paths = find_file_recursive(path, '*cuda*')
print(paths)
+ except Exception as e:
- except:
+ print(f'Could not read LD_LIBRARY_PATH: {path} ({e})')
- print(f'Could not read LD_LIBRARY_PATH: {path}')
print('')
===========changed ref 1===========
# module: bitsandbytes.__main__
def find_file_recursive(folder, filename):
import glob
outs = []
try:
for ext in ["so", "dll", "dylib"]:
out = glob.glob(os.path.join(folder, "**", filename + ext))
outs.extend(out)
except Exception as e:
+ raise RuntimeError('Error: Something when wrong when trying to find file.') from e
- raise RuntimeError('Error: Something when wrong when trying to find file. {e}')
return outs
===========changed ref 2===========
# module: bitsandbytes.functional
+ FIRST_CUDA_DEVICE = torch.device('cuda', index=0)
+
C = 127.0
===========changed ref 3===========
# module: tests.test_optim
def get_temp_dir():
+ path = f"/tmp/autoswap/{uuid.uuid4()}"
- path = f"/tmp/autoswap/{str(uuid.uuid4())}"
os.makedirs(path, exist_ok=True)
return path
===========changed ref 4===========
# module: bitsandbytes.nn.modules
class Int8Params(torch.nn.Parameter):
def cuda(self, device):
if self.has_fp16_weights:
return super().cuda(device)
else:
# we store the 8-bit rows-major weight
# we convert this weight to the turning/ampere weight during the first inference pass
B = self.data.contiguous().half().cuda(device)
CB, CBt, SCB, SCBt, coo_tensorB = bnb.functional.double_quant(B)
del CBt
del SCBt
self.data = CB
+ self.CB = CB
+ self.SCB = SCB
- setattr(self, "CB", CB)
- setattr(self, "SCB", SCB)
return self
===========changed ref 5===========
# module: bitsandbytes.cuda_setup.main
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
existent_directories: Set[Path] = set()
for path in candidate_paths:
try:
if path.exists():
existent_directories.add(path)
+ except PermissionError:
- except PermissionError as pex:
# Handle the PermissionError first as it is a subtype of OSError
# https://docs.python.org/3/library/exceptions.html#exception-hierarchy
pass
except OSError as exc:
if exc.errno != errno.ENAMETOOLONG:
raise exc
non_existent_directories: Set[Path] = candidate_paths - existent_directories
if non_existent_directories:
+ CUDASetup.get_instance().add_log_entry(
+ f"The following directories listed in your path were found to be non-existent: {non_existent_directories}",
+ is_warning=False,
+ )
- CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
- f"be non-existent: {non_existent_directories}", is_warning=False)
return existent_directories
===========changed ref 6===========
# module: bitsandbytes.nn.modules
class Linear4bit(nn.Linear):
def set_compute_type(self, x):
if x.dtype in [torch.float32, torch.bfloat16]:
# the input is in a dtype that is safe to compute in, we switch
# to this type for speed and stability
self.compute_dtype = x.dtype
elif x.dtype == torch.float16:
# we take the compoute dtype passed into the layer
if self.compute_dtype == torch.float32 and (x.numel() == x.shape[-1]):
# single batch inference with input torch.float16 and compute_dtype float32 -> slow inference when it could be fast
# warn the user about this
+ warnings.warn('Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.')
- warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.')
warnings.filterwarnings('ignore', message='.*inference.')
if self.compute_dtype == torch.float32 and (x.numel() != x.shape[-1]):
+ warnings.warn('Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.')
- warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.')
warnings.filterwarnings('ignore', message='.*inference or training')
|
bitsandbytes.functional/igemmlt
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
29a637bce75341dae1b6a171eabb2fe80d75ef57
|
Don't crash Python interpreter via assert(false) (#998)
|
# module: bitsandbytes.functional
def igemmlt(A, B, SA, SB, out=None, Sout=None, dtype=torch.int32):
<0> shapeA = SA[0]
<1> shapeB = SB[0]
<2> dimsA = len(shapeA)
<3> dimsB = len(shapeB)
<4> assert dimsB == 2, 'Only two dimensional matrices are supported for argument B'
<5> if dimsA == 2:
<6> m = shapeA[0]
<7> elif dimsA == 3:
<8> m = shapeA[0] * shapeA[1]
<9>
<10> rows = n = shapeB[0]
<11> assert prod(list(shapeA)) > 0, f'Input tensor dimensions need to be > 0: {shapeA}'
<12>
<13> # if the tensor is empty, return a transformed empty tensor with the right dimensions
<14> if shapeA[0] == 0 and dimsA == 2:
<15> return torch.empty((0, shapeB[0]), device=A.device, dtype=torch.float16)
<16> elif shapeA[1] == 0 and dimsA == 3:
<17> return torch.empty(tuple(shapeA[:2] + [shapeB[0]]), device=A.device, dtype=torch.float16)
<18>
<19> if dimsA == 2 and out is None:
<20> out, Sout = get_transform_buffer(
<21> (shapeA[0], shapeB[0]), dtype, A.device, "col32", "row"
<22> )
<23> elif dimsA == 3 and out is None:
<24> out, Sout = get_transform_buffer(
<25> (shapeA[0], shapeA[1], shapeB[0]), dtype, A.device, "col32", "row"
<26> )
<27>
<28> assert dimsB != 3, "len(B.shape)==3 not supported"
<29> assert A.device.type == "cuda"
<30> assert B.device.type == "cuda"
<31> assert A.dtype == torch.int8
<32> assert B.dtype == torch</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def igemmlt(A, B, SA, SB, out=None, Sout=None, dtype=torch.int32):
# offset: 1
assert out.dtype == dtype
assert SA[1] == "col32"
assert SB[1] in ["col_turing", "col_ampere"]
assert Sout[1] == "col32"
assert (
shapeA[-1] == shapeB[-1]
), f"Matmullt only supports A @ B^T. Inner matrix dimensions do not match: A @ B = {shapeA} @ {shapeB}"
formatB = SB[1]
prev_device = A.device
torch.cuda.set_device(A.device)
ptr = CUBLAS_Context.get_instance().get_context(A.device)
ptrA = get_ptr(A)
ptrB = get_ptr(B)
ptrC = get_ptr(out)
k = shapeA[-1]
lda = ct.c_int32(m * 32)
if formatB == "col_turing":
# turing: tiles with rows filled up to multiple of 8 rows by 32 columns
# n = rows
ldb = ct.c_int32(((rows + 7) // 8) * 8 * 32)
else:
# ampere: tiles with rows filled up to multiple of 32 rows by 32 columns
# n = rows
ldb = ct.c_int32(((rows + 31) // 32) * 32 * 32)
ldc = ct.c_int32(m * 32)
m = ct.c_int32(m)
n = ct.c_int32(n)
k = ct.c_int32(k)
has_error = 0
ptrRowScale = get_ptr(None)
is_on_gpu([A, B, out])
if formatB == 'col_turing':
if dtype == torch.int32:
has_error = lib.</s>
===========below chunk 1===========
# module: bitsandbytes.functional
def igemmlt(A, B, SA, SB, out=None, Sout=None, dtype=torch.int32):
# offset: 2
<s>
if formatB == 'col_turing':
if dtype == torch.int32:
has_error = lib.cigemmlt_turing_32(
ptr, m, n, k, ptrA, ptrB, ptrC, ptrRowScale, lda, ldb, ldc
)
else:
has_error = lib.cigemmlt_turing_8(
ptr, m, n, k, ptrA, ptrB, ptrC, ptrRowScale, lda, ldb, ldc
)
elif formatB == "col_ampere":
if dtype == torch.int32:
has_error = lib.cigemmlt_ampere_32(
ptr, m, n, k, ptrA, ptrB, ptrC, ptrRowScale, lda, ldb, ldc
)
else:
has_error = lib.cigemmlt_ampere_8(
ptr, m, n, k, ptrA, ptrB, ptrC, ptrRowScale, lda, ldb, ldc
)
if has_error == 1:
print(f'A: {shapeA}, B: {shapeB}, C: {Sout[0]}; (lda, ldb, ldc): {(lda, ldb, ldc)}; (m, n, k): {(m, n, k)}')
raise Exception('cublasLt ran into an error!')
torch.cuda.set_device(prev_device)
return out, Sout
|
|
tests.test_autograd/test_matmullt
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
89876bb0e29e45c2823e33e2fceac623a00b4fd3
|
Merge pull request #876 from wkpark/minimal-win-fix
|
<7>:<add> for i in range(3):
<del> for i in range(k):
|
<s>etrize(
- "dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, decomp, has_fp16_weights, has_bias",
- values,
- ids=names,
- )
def test_matmullt(
dim1,
dim2,
dim3,
dim4,
funcs,
dtype,
req_grad,
transpose,
decomp,
has_fp16_weights,
has_bias
):
<0> dimA = (dim2, dim3) if not transpose[0] else (dim3, dim2)
<1> dimB = (dim3, dim4) if not transpose[1] else (dim4, dim3)
<2> outlier_dim = torch.randint(0, dimA[1], size=(dimA[1] // 8,), device="cuda")
<3> if has_bias == False:
<4> req_grad = list(req_grad)
<5> req_grad[2] = False
<6>
<7> for i in range(k):
<8>
<9> # normal multiply
<10> if funcs[0] in [torch.mm, torch.matmul]:
<11> A = torch.randn(
<12> size=dimA, device="cuda", requires_grad=req_grad[0], dtype=dtype
<13> )
<14> if decomp == 6.0:
<15> with torch.no_grad():
<16> A[:, outlier_dim] = 6.0
<17> B = torch.randn(
<18> size=dimB, device="cuda", requires_grad=req_grad[1], dtype=dtype
<19> )
<20> target = torch.randn(
<21> size=(dim2, dim4),
<22> device="cuda",
<23> requires_grad=req_grad[1],
<24> dtype=dtype,
<25> )
<26> bias = None
<27> bias2 = None
<28> if has_bias:
<29> bias = torch.randn(dim4, device='cuda', dtype=dtype, requires_grad=req_grad[</s>
|
===========below chunk 0===========
<s> "dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, decomp, has_fp16_weights, has_bias",
- values,
- ids=names,
- )
def test_matmullt(
dim1,
dim2,
dim3,
dim4,
funcs,
dtype,
req_grad,
transpose,
decomp,
has_fp16_weights,
has_bias
):
# offset: 1
bias2 = bias.clone()
torch.nn.init.xavier_uniform_(B)
B2 = B.clone()
state = bnb.MatmulLtState()
state.threshold = decomp
state.has_fp16_weights = has_fp16_weights
if not has_fp16_weights:
if not transpose[0] and not transpose[1]:
B2 = B2.t().contiguous()
(
state.CB,
CBt,
state.SCB,
SCBt,
coo_tensorB,
) = bnb.functional.double_quant(B2.to(torch.float16))
B2 = state.CB
if not transpose[0] and transpose[1]:
out_torch = funcs[0](A, B.t())
out_bnb = funcs[1](A, B2, state=state, bias=bias2)
elif not transpose[0] and not transpose[1]:
out_torch = funcs[0](A, B)
out_bnb = funcs[1](A, B2.t(), state=state, bias=bias2)
if has_bias:
out_torch += bias
assert out_bnb.dtype == A.dtype, f"bnb matmullt received {A.dtype} but returned {out_bnb.dtype}"
n = out_bnb.numel()
err = torch.abs(out_bnb - out_torch).mean().item()
# print</s>
===========below chunk 1===========
<s> "dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, decomp, has_fp16_weights, has_bias",
- values,
- ids=names,
- )
def test_matmullt(
dim1,
dim2,
dim3,
dim4,
funcs,
dtype,
req_grad,
transpose,
decomp,
has_fp16_weights,
has_bias
):
# offset: 2
<s>.numel()
err = torch.abs(out_bnb - out_torch).mean().item()
# print(f'abs error {err:.4f}')
idx = torch.isclose(out_bnb, out_torch, atol=0.01, rtol=0.1)
assert (idx == 0).sum().item() <= n * (0.0175 if dtype == torch.float16 else 0.021)
idx = torch.isclose(out_bnb, out_torch, atol=0.035, rtol=0.2)
assert (idx == 0).sum().item() <= n * 0.001
if has_fp16_weights:
if any(req_grad):
out_bnb.data.copy_(out_torch)
torch.cuda.synchronize()
loss_bnb = torch.nn.functional.mse_loss(
out_bnb, target
).mean()
loss_bnb.backward()
gradA1 = A.grad
gradB1 = B.grad
A.grad = None
B.grad = None
if has_bias:
gradBias1 = bias.grad
bias.grad = None
loss_torch = torch.nn.functional.mse_loss(
out_torch, target
).mean()
loss_torch.backward()
grad</s>
===========below chunk 2===========
<s> "dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, decomp, has_fp16_weights, has_bias",
- values,
- ids=names,
- )
def test_matmullt(
dim1,
dim2,
dim3,
dim4,
funcs,
dtype,
req_grad,
transpose,
decomp,
has_fp16_weights,
has_bias
):
# offset: 3
<s> = A.grad
gradB2 = B.grad
A.grad = None
B.grad = None
if has_bias:
gradBias2 = bias.grad
bias.grad = None
if req_grad[0]:
torch.testing.assert_close(
gradA1, gradA2, atol=0.015, rtol=0.1
)
if req_grad[1]:
n = gradB1.numel()
if dim2 > 0:
assert torch.abs(gradB1).sum() > 0.0
assert torch.abs(gradB2).sum() > 0.0
else:
assert torch.abs(gradB1).sum() == 0.0
assert torch.abs(gradB2).sum() == 0.0
idx = torch.isclose(gradB1, gradB2, atol=0.06, rtol=0.3)
assert (idx == 0).sum().item() <= n * 0.1
idx = torch.isclose(gradB1, gradB2, atol=0.10, rtol=0.3)
assert (idx == 0).sum().item() <= n * 0.02
torch.testing.assert_close(
gradB1, gradB2, atol=0.18, rtol=0.3
)
if req_grad[2]:
torch.testing.assert_close(
===========changed ref 0===========
# module: tests.test_autograd
- n = 1
- k = 3
- dim1 = torch.randint(16, 64, size=(n,)).tolist()
- dim2 = torch.randint(32, 96, size=(n,)).tolist()
- dim3 = torch.randint(32, 96, size=(n,)).tolist()
- dim4 = torch.randint(32, 96, size=(n,)).tolist()
-
- dim2.append(0)
-
- decomp = [0.0, 6.0]
- funcs = [(torch.matmul, bnb.matmul), (torch.matmul, bnb.research.switchback_bnb)]
- str_funcs = ["matmullt", 'switchback_bnb']
- req_grad = [(False, False), (True, False), (True, True), (False, True)]
- req_grad = list(product([True, False], repeat=3))
- req_grad_str = []
- for c in req_grad:
- strval = ''
- for v in c:
- if v == True: strval += 'T'
- else: strval += 'F'
- req_grad_str.append(strval)
-
- transpose = [(False, True), (False, False)]
- str_transpose = ["NT", "NN"]
- dtype = [torch.float16, torch.bfloat16, torch.float32]
- has_fp16_weights = [True, False]
- has_bias = [True, False]
- values = list(
- product(
- dim1,
- dim2,
- dim3,
- dim4,
- funcs,
- dtype,
- req_grad,
- transpose,
- decomp,
- has_fp16_weights,
- has_bias
- )
- )
- str_values = list(
- product(
- dim1,
- dim2,
- dim3,
- dim4,
</s>
|
tests.test_autograd/test_matmul_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
89876bb0e29e45c2823e33e2fceac623a00b4fd3
|
Merge pull request #876 from wkpark/minimal-win-fix
|
<6>:<add> for i in range(3):
<del> for i in range(k):
|
<s>2, dim3, dim4, funcs, dtype, req_grad, transpose, has_bias, compress_statistics, quant_type", values, ids=names)
+ def test_matmul_4bit(dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, has_bias, compress_statistics, quant_type):
- def test_matmul_4bit( dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, has_bias, compress_statistics, quant_type):
<0> dimA = (dim2, dim3) if not transpose[0] else (dim3, dim2)
<1> dimB = (dim3, dim4) if not transpose[1] else (dim4, dim3)
<2> if has_bias == False:
<3> req_grad = list(req_grad)
<4> req_grad[2] = False
<5>
<6> for i in range(k):
<7> # normal multiply
<8> if funcs[0] in [torch.mm, torch.matmul]:
<9> A = torch.randn(size=dimA, device="cuda", requires_grad=req_grad[0], dtype=dtype)
<10> B = torch.randn(size=dimB, device="cuda", requires_grad=req_grad[1], dtype=dtype)
<11> target = torch.randn(size=(dim2, dim4), device="cuda", requires_grad=req_grad[1], dtype=dtype)
<12> bias = None
<13> bias2 = None
<14> if has_bias:
<15> bias = torch.randn(dim4, device='cuda', dtype=dtype, requires_grad=req_grad[2])
<16> bias2 = bias.clone()
<17> torch.nn.init.xavier_uniform_(B)
<18>
<19> B2, quant_state = bnb.functional.quantize_4bit(B, compress_statistics=compress_statistics, quant_type=quant_type)
<20>
<21> if not transpose[0] and transpose[1]:
<22> out_torch = funcs[0](A,</s>
|
===========below chunk 0===========
<s> dim4, funcs, dtype, req_grad, transpose, has_bias, compress_statistics, quant_type", values, ids=names)
+ def test_matmul_4bit(dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, has_bias, compress_statistics, quant_type):
- def test_matmul_4bit( dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, has_bias, compress_statistics, quant_type):
# offset: 1
out_bnb = funcs[1](A, B2.t(), quant_state, bias=bias2)
elif not transpose[0] and not transpose[1]:
out_torch = funcs[0](A, B)
out_bnb = funcs[1](A, B2, quant_state, bias=bias2)
if has_bias:
out_torch += bias
assert out_bnb.dtype == A.dtype, f"bnb matmullt received {A.dtype} but returned {out_bnb.dtype}"
n = out_bnb.numel()
err = torch.abs(out_bnb - out_torch).float().mean().item()
if n > 0:
assert err < 0.115
#assert err < 0.20
if any(req_grad):
out_bnb.data.copy_(out_torch)
torch.cuda.synchronize()
loss_bnb = torch.nn.functional.mse_loss(out_bnb, target).mean()
loss_bnb.backward()
gradA1 = A.grad
gradB1 = B.grad
A.grad = None
B.grad = None
if has_bias:
gradBias1 = bias.grad
bias.grad = None
loss_torch = torch.nn.functional.mse_loss( out_torch, target ).mean()
loss_torch.backward()
gradA2 = A.grad
gradB2 = B.grad</s>
===========below chunk 1===========
<s> dim4, funcs, dtype, req_grad, transpose, has_bias, compress_statistics, quant_type", values, ids=names)
+ def test_matmul_4bit(dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, has_bias, compress_statistics, quant_type):
- def test_matmul_4bit( dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, has_bias, compress_statistics, quant_type):
# offset: 2
<s>.mean()
loss_torch.backward()
gradA2 = A.grad
gradB2 = B.grad
A.grad = None
B.grad = None
if has_bias:
gradBias2 = bias.grad
bias.grad = None
if req_grad[0]:
torch.testing.assert_close( gradA1, gradA2, atol=0.015, rtol=0.1)
if req_grad[2]:
torch.testing.assert_close(gradBias1, gradBias2)
===========changed ref 0===========
# module: tests.test_autograd
- n = 1
- k = 3
- dim1 = torch.randint(16, 64, size=(n,)).tolist()
- dim2 = torch.randint(32, 96, size=(n,)).tolist()
- dim3 = torch.randint(32, 96, size=(n,)).tolist()
- dim4 = torch.randint(32, 96, size=(n,)).tolist()
-
- dim2.append(0)
-
- funcs = [(torch.matmul, bnb.matmul_4bit)]
- str_funcs = ["matmul"]
- req_grad = list(product([True, False], repeat=3))
- req_grad_str = []
- for c in req_grad:
- strval = ''
- for v in c:
- if v == True: strval += 'T'
- else: strval += 'F'
- req_grad_str.append(strval)
-
- transpose = [(False, True), (False, False)]
- str_transpose = ["NT", "NN"]
- dtype = [torch.float16, torch.float32]
- compress_statistics = [False, True]
- has_fp16_weights = [True, False]
- has_bias = [True, False]
- quant_type = ['fp4', 'nf4']
- values = list(product(dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, has_bias, compress_statistics, quant_type))
- str_values = list(product(dim1, dim2, dim3, dim4, str_funcs, dtype, req_grad_str, str_transpose, has_bias, compress_statistics, quant_type))
- names = ["dim1_{}_dim2_{}_dim3_{}_dim4_{}_func_{}_dtype_{}_requires_grad_{}_transpose_{}_has_bias_{}_compress_statistics_{}_quant_type_{}".format(*vals) for vals in str_values]
-
===========changed ref 1===========
# module: tests.test_autograd
- n = 1
- k = 3
- dim1 = torch.randint(16, 64, size=(n,)).tolist()
- dim2 = torch.randint(32, 96, size=(n,)).tolist()
- dim3 = torch.randint(32, 96, size=(n,)).tolist()
- dim4 = torch.randint(32, 96, size=(n,)).tolist()
-
- dim2.append(0)
-
- decomp = [0.0, 6.0]
- funcs = [(torch.matmul, bnb.matmul), (torch.matmul, bnb.research.switchback_bnb)]
- str_funcs = ["matmullt", 'switchback_bnb']
- req_grad = [(False, False), (True, False), (True, True), (False, True)]
- req_grad = list(product([True, False], repeat=3))
- req_grad_str = []
- for c in req_grad:
- strval = ''
- for v in c:
- if v == True: strval += 'T'
- else: strval += 'F'
- req_grad_str.append(strval)
-
- transpose = [(False, True), (False, False)]
- str_transpose = ["NT", "NN"]
- dtype = [torch.float16, torch.bfloat16, torch.float32]
- has_fp16_weights = [True, False]
- has_bias = [True, False]
- values = list(
- product(
- dim1,
- dim2,
- dim3,
- dim4,
- funcs,
- dtype,
- req_grad,
- transpose,
- decomp,
- has_fp16_weights,
- has_bias
- )
- )
- str_values = list(
- product(
- dim1,
- dim2,
- dim3,
- dim4,
</s>
===========changed ref 2===========
# module: tests.test_autograd
# offset: 1
<s>
- product(
- dim1,
- dim2,
- dim3,
- dim4,
- str_funcs,
- dtype,
- req_grad_str,
- str_transpose,
- decomp,
- has_fp16_weights,
- has_bias
- )
- )
- names = ["dim1_{}_dim2_{}_dim3_{}_dim4_{}_func_{}_dtype_{}_requires_grad_{}_transpose_{}_decomp_{}_has_fp16_weights_{}_has_bias_{}".format(*vals) for vals in str_values]
-
|
tests.test_autograd/test_matmul_fp8
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
89876bb0e29e45c2823e33e2fceac623a00b4fd3
|
Merge pull request #876 from wkpark/minimal-win-fix
|
<5>:<add> for i in range(3):
<del> for i in range(k):
|
<s>research.matmul_fp8_mixed), (torch.matmul, bnb.research.matmul_fp8_global)], ids=["matmul_fp8_mixed", 'matmul_fp8_global'])
- @pytest.mark.parametrize( "dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose", values, ids=names)
def test_matmul_fp8( dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose):
<0> dimA = (dim2, dim3) if not transpose[0] else (dim3, dim2)
<1> dimB = (dim3, dim4) if not transpose[1] else (dim4, dim3)
<2> req_grad = list(req_grad)
<3> req_grad[2] = False
<4>
<5> for i in range(k):
<6> # normal multiply
<7> if funcs[0] in [torch.mm, torch.matmul]:
<8> A = torch.randn(size=dimA, device="cuda", requires_grad=req_grad[0], dtype=dtype)
<9> B = torch.randn(size=dimB, device="cuda", requires_grad=req_grad[1], dtype=dtype)
<10> target = torch.randn(size=(dim2, dim4), device="cuda", requires_grad=req_grad[1], dtype=dtype)
<11>
<12> torch.nn.init.xavier_uniform_(B)
<13>
<14> fw_code = bnb.functional.create_fp8_map(True, 4, 3, 8).to(A.device)
<15> bw_code = bnb.functional.create_fp8_map(True, 5, 2, 8).to(A.device)
<16>
<17> if not transpose[0] and transpose[1]:
<18> out_torch = funcs[0](A, B.t())
<19> out_bnb = funcs[1](A, B.t(), fw_code, bw_code)
<20> elif not transpose[0] and not transpose[</s>
|
===========below chunk 0===========
<s>_fp8_mixed), (torch.matmul, bnb.research.matmul_fp8_global)], ids=["matmul_fp8_mixed", 'matmul_fp8_global'])
- @pytest.mark.parametrize( "dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose", values, ids=names)
def test_matmul_fp8( dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose):
# offset: 1
out_torch = funcs[0](A, B)
out_bnb = funcs[1](A, B, fw_code, bw_code)
assert out_bnb.dtype == A.dtype, f"bnb matmullt received {A.dtype} but returned {out_bnb.dtype}"
n = out_bnb.numel()
err = torch.abs(out_bnb - out_torch).float().mean().item()
if n > 0:
assert err < 0.115
#assert err < 0.20
if any(req_grad):
out_bnb.data.copy_(out_torch)
torch.cuda.synchronize()
loss_bnb = torch.nn.functional.mse_loss(out_bnb, target).mean()
loss_bnb.backward()
gradA1 = A.grad
gradB1 = B.grad
A.grad = None
B.grad = None
loss_torch = torch.nn.functional.mse_loss( out_torch, target ).mean()
loss_torch.backward()
gradA2 = A.grad
gradB2 = B.grad
A.grad = None
B.grad = None
if req_grad[0]:
torch.testing.assert_close( gradA1, gradA2, atol=0.015, rtol=0.1)
if req_grad[1]:
n = gradB1.numel()
if dim2</s>
===========below chunk 1===========
<s>_fp8_mixed), (torch.matmul, bnb.research.matmul_fp8_global)], ids=["matmul_fp8_mixed", 'matmul_fp8_global'])
- @pytest.mark.parametrize( "dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose", values, ids=names)
def test_matmul_fp8( dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose):
# offset: 2
<s>0.1)
if req_grad[1]:
n = gradB1.numel()
if dim2 > 0:
assert torch.abs(gradB1).sum() > 0.0
assert torch.abs(gradB2).sum() > 0.0
else:
assert torch.abs(gradB1).sum() == 0.0
assert torch.abs(gradB2).sum() == 0.0
idx = torch.isclose(gradB1, gradB2, atol=0.06, rtol=0.3)
assert (idx == 0).sum().item() <= n * 0.1
idx = torch.isclose(gradB1, gradB2, atol=0.10, rtol=0.3)
assert (idx == 0).sum().item() <= n * 0.02
grad_err = (gradB1-gradB2).abs().mean()
assert grad_err.item() < 0.003
torch.testing.assert_close(
gradB1, gradB2, atol=0.18, rtol=0.3
)
===========changed ref 0===========
# module: tests.test_autograd
- funcs = [(torch.matmul, bnb.research.matmul_fp8_mixed), (torch.matmul, bnb.research.matmul_fp8_global)]
- str_funcs = ["matmul_fp8_mixed", 'matmul_fp8_global']
- req_grad = list(product([True, False], repeat=3))
- req_grad_str = []
- for c in req_grad:
- strval = ''
- for v in c:
- if v == True: strval += 'T'
- else: strval += 'F'
- req_grad_str.append(strval)
+ TRANSPOSE_VALS = [(False, True), (False, False)]
- transpose = [(False, True), (False, False)]
- str_transpose = ["NT", "NN"]
- dtype = [torch.float16, torch.float32]
- has_fp16_weights = [True, False]
- values = list(product(dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose))
- str_values = list(product(dim1, dim2, dim3, dim4, str_funcs, dtype, req_grad_str, str_transpose))
- names = ["dim1_{}_dim2_{}_dim3_{}_dim4_{}_func_{}_dtype_{}_requires_grad_{}_transpose_{}".format(*vals) for vals in str_values]
-
===========changed ref 1===========
# module: tests.test_autograd
- n = 1
- k = 3
- dim1 = torch.randint(16, 64, size=(n,)).tolist()
- dim2 = torch.randint(32, 96, size=(n,)).tolist()
- dim3 = torch.randint(32, 96, size=(n,)).tolist()
- dim4 = torch.randint(32, 96, size=(n,)).tolist()
-
- dim2.append(0)
-
- funcs = [(torch.matmul, bnb.matmul_4bit)]
- str_funcs = ["matmul"]
- req_grad = list(product([True, False], repeat=3))
- req_grad_str = []
- for c in req_grad:
- strval = ''
- for v in c:
- if v == True: strval += 'T'
- else: strval += 'F'
- req_grad_str.append(strval)
-
- transpose = [(False, True), (False, False)]
- str_transpose = ["NT", "NN"]
- dtype = [torch.float16, torch.float32]
- compress_statistics = [False, True]
- has_fp16_weights = [True, False]
- has_bias = [True, False]
- quant_type = ['fp4', 'nf4']
- values = list(product(dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, has_bias, compress_statistics, quant_type))
- str_values = list(product(dim1, dim2, dim3, dim4, str_funcs, dtype, req_grad_str, str_transpose, has_bias, compress_statistics, quant_type))
- names = ["dim1_{}_dim2_{}_dim3_{}_dim4_{}_func_{}_dtype_{}_requires_grad_{}_transpose_{}_has_bias_{}_compress_statistics_{}_quant_type_{}".format(*vals) for vals in str_values]
-
|
tests.test_functional/test_nvidia_transform
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
89876bb0e29e45c2823e33e2fceac623a00b4fd3
|
Merge pull request #876 from wkpark/minimal-win-fix
|
<0>:<add> if dims == 3 and orderOut != "col32":
<del> if dims == 3 and out_order != "col32":
<2>:<add> if dtype == torch.int32 and orderOut != "col32":
<del> if dtype == torch.int32 and out_order != "col32":
|
<s>
+ @pytest.mark.parametrize("transpose", [False], ids=id_formatter("transpose"))
+ @pytest.mark.parametrize("dims", [2, 3], ids=id_formatter("dims"))
- @pytest.mark.parametrize("dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose",values,ids=names)
def test_nvidia_transform(dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose):
<0> if dims == 3 and out_order != "col32":
<1> return
<2> if dtype == torch.int32 and out_order != "col32":
<3> return
<4> try:
<5> func = F.get_transform_func(dtype, orderA, orderOut, transpose)
<6> except ValueError as ve:
<7> pytest.skip(str(ve)) # skip if not supported
<8>
<9> if dims == 2:
<10> A = torch.randint(-128, 127, size=(dim1, dim2), device="cuda").to(dtype)
<11> elif dims == 3:
<12> A = torch.randint(-128, 127, size=(dim1, dim2, dim3), device="cuda").to(
<13> dtype
<14> )
<15>
<16> out, S = F.nvidia_transform(A, to_order=orderOut)
<17>
<18> if orderOut == "row":
<19> torch.testing.assert_close(A.flatten(), out.flatten())
<20> elif orderOut == "col":
<21> torch.testing.assert_close(A.t().flatten(), out.flatten())
<22> elif orderOut == "col32":
<23> if dims == 2:
<24> n = A.shape[0] * (A.shape[1] + (32 - (A.shape[1] % 32)))
<25> elif dims == 3:
<26> n = (
<27> A.shape[0]
<28> * A.shape[1]
<29> * (A.shape[2] + (32 - (A.shape[2] %</s>
|
===========below chunk 0===========
<s>.mark.parametrize("transpose", [False], ids=id_formatter("transpose"))
+ @pytest.mark.parametrize("dims", [2, 3], ids=id_formatter("dims"))
- @pytest.mark.parametrize("dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose",values,ids=names)
def test_nvidia_transform(dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose):
# offset: 1
)
assert out.numel() == n
elif orderOut == "col_turing":
# 32 col 8 row tiles
n = (A.shape[0] + (8 - A.shape[0] % 8)) * (
A.shape[1] + (32 - (A.shape[1] % 32))
)
assert out.numel() == n
total_coltile = (A.shape[1] // 32) + (1 if A.shape[1] % 32 != 0 else 0)
for row in range(A.shape[0]):
for col in range(A.shape[1]):
i = row * A.shape[1]
j = col
coltile = (col // 32) + (1 if col % 32 != 0 else 0)
rowtile = (
(row // 8) + (1 if row % 8 != 0 else 0)
) * total_coltile
offset = 32 * 8 * (rowtile + coltile)
col2 = col % 32
row2 = (row % 8) * 32
assert A.flatten()[i + j] == A[row, col]
# assert A.flatten()[i+j] == out.flatten()[row2+col2]
# torch.testing.assert_close(A.flatten()[i+j], A[row, col])
# torch.testing.assert_close(A.flatten()[i+j], out.flatten()[row2+ col2+block_offset])
if orderOut == "col32":
</s>
===========below chunk 1===========
<s>.mark.parametrize("transpose", [False], ids=id_formatter("transpose"))
+ @pytest.mark.parametrize("dims", [2, 3], ids=id_formatter("dims"))
- @pytest.mark.parametrize("dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose",values,ids=names)
def test_nvidia_transform(dim1, dim2, dim3, dims, dtype, orderA, orderOut, transpose):
# offset: 2
<s>+j], out.flatten()[row2+ col2+block_offset])
if orderOut == "col32":
out2, S = F.nvidia_transform(
out, from_order=orderOut, to_order="row", state=S
)
torch.testing.assert_close(A, out2)
===========changed ref 0===========
# module: tests.test_functional
- n = 1
- dim1 = torch.randint(1, 64, size=(n,)).tolist()
- dim2 = torch.randint(32, 128, size=(n,)).tolist()
- dim3 = torch.randint(32, 256, size=(n,)).tolist()
- values = list(product(dim1, dim2, dim3))
- names = ["dim1_{}_dim2_{}_dim3_{}".format(*vals) for vals in values]
-
===========changed ref 1===========
# module: tests.test_functional
- n = 2
- dim1 = torch.randint(2, 256, size=(n,)).tolist()
- dim2 = torch.randint(2, 256, size=(n,)).tolist()
- dim3 = torch.randint(2, 256, size=(n,)).tolist()
- # dim1, dim2 = (256,), (256,)
- dtype = [torch.int8, torch.int32]
- a_order = ["row"]
- out_order = ["col", "row", "col32"]
- transpose = [False]
- dims = [2, 3]
- values = list(product(dim1, dim2, dim3, dims, dtype, a_order, out_order, transpose))
-
- names = ["dim1_{}_dim2_{}_dim3_{}_dims_{}_dtype_{}_orderA_{}_orderOut_{}_transpose_{}".format(*vals)for vals in values]
-
===========changed ref 2===========
# module: tests.test_functional
- n = 2
- dim1 = torch.randint(1, 64, size=(n,)).tolist()
- dim2 = torch.randint(32, 128, size=(n,)).tolist()
- dim3 = torch.randint(32, 256, size=(n,)).tolist()
- dim4 = torch.randint(32, 256, size=(n,)).tolist()
- transpose = [(False, False), (True, False), (False, True), (True, True)]
- values = list(product(dim1, dim2, dim3, dim4, transpose))
- names = [
- "dim1_{}_dim2_{}_dim3_{}_dim4_{}_transpose_{}".format(*vals)
- for vals in values
- ]
-
===========changed ref 3===========
# module: tests.test_functional
- n = 2
- seq_dim = torch.randint(32, 512, size=(n,)).tolist()
- hidden_dim = torch.randint(32, 1024 * 4, size=(n,)).tolist()
- batch_dim = torch.randint(2, 16, size=(n,)).tolist()
- transpose = [False, True]
- values = list(product(seq_dim, hidden_dim, batch_dim, transpose))
- names = [
- "seq_dim={}_hidden_dim={}_batch_dim={}_transpose{}".format(*vals)
- for vals in values
- ]
-
===========changed ref 4===========
# module: tests.test_functional
- n = 3
- seq_dim = torch.randint(32, 512, size=(n,)).tolist()
- hidden_dim = torch.randint(32, 1024 * 4, size=(n,)).tolist()
- batch_dim = torch.randint(2, 16, size=(n,)).tolist()
- values = list(product(seq_dim, hidden_dim, batch_dim))
- names = [
- "seq_dim{}_hidden_dim{}_batch_dim{}".format(*vals) for vals in values
- ]
-
===========changed ref 5===========
# module: tests.test_functional
- n = 2
- hidden_dim = torch.randint(32, 256, size=(n,)).tolist()
- batch_dim = torch.randint(16, 256, size=(n,)).tolist()
- seq_dim = torch.randint(16, 256, size=(n,)).tolist()
- transpose = [(False, False), (False, True), (True, False), (True, True)]
- values = list(product(hidden_dim, batch_dim, transpose, seq_dim))
- names = [
- "hidden_dim_{}_batch_dim_{},transpose_{}_seq_dim_{}".format(*vals)
- for vals in values
- ]
-
|
tests.test_functional/test_row_scale_bench
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
89876bb0e29e45c2823e33e2fceac623a00b4fd3
|
Merge pull request #876 from wkpark/minimal-win-fix
|
<0>:<add> formatB = F.get_special_format_str()
|
<s>88, id="1024, 12288*4, 12288"),
+ pytest.param(2048, 4096 * 4, 4096, id="2048, 4096*4, 4096"),
+ ],
+ )
- @pytest.mark.parametrize("dim1, dim4, inner", values, ids=names)
@pytest.mark.skip("Row scale has some bugs for ampere")
+ @pytest.mark.benchmark
def test_row_scale_bench(dim1, dim4, inner):
<0> err1, err2, err3 = [], [], []
<1> relerr1, relerr2 = [], []
<2> scale = 1
<3> A = torch.randn(dim1, inner, device="cuda").half()
<4> B = torch.randn(dim4, inner, device="cuda").half()
<5> torch.nn.init.xavier_uniform_(B)
<6> # warmpup
<7> for i in range(k):
<8> C1 = torch.matmul(A, B.t())
<9>
<10> torch.cuda.synchronize()
<11> t0 = time.time()
<12> for i in range(k):
<13> C1 = torch.matmul(A, B.t())
<14> torch.cuda.synchronize()
<15> print("16", time.time() - t0)
<16>
<17> C1a, C1b, stats1a, stats1b, coo_tensor = F.double_quant(A)
<18> CB, absmaxB = F.vectorwise_quant(B, quant_type="linear")
<19> A2, SA = F.nvidia_transform(C1a, "col32")
<20> B2, SB = F.nvidia_transform(CB, formatB)
<21> A1, maxA = F.vectorwise_quant(A, dim=1)
<22>
<23> c = 10.0 * inner * scale
<24> row_scale = maxA / c
<25> torch.cuda.synchronize()
<26> t0 = time.time()</s>
|
===========below chunk 0===========
<s>24, 12288*4, 12288"),
+ pytest.param(2048, 4096 * 4, 4096, id="2048, 4096*4, 4096"),
+ ],
+ )
- @pytest.mark.parametrize("dim1, dim4, inner", values, ids=names)
@pytest.mark.skip("Row scale has some bugs for ampere")
+ @pytest.mark.benchmark
def test_row_scale_bench(dim1, dim4, inner):
# offset: 1
outC32, SC = F.igemmlt(
A2, B2, SA, SB, dtype=torch.int8, row_scale=row_scale
)
torch.cuda.synchronize()
print("row-wise", time.time() - t0)
C2a, C2b, stats2a, stats2b, coo_tensor = F.double_quant(B)
B2, SB = F.nvidia_transform(C2a, formatB)
torch.cuda.synchronize()
t0 = time.time()
for i in range(k):
outC32, SC = F.igemmlt(A2, B2, SA, SB)
torch.cuda.synchronize()
print("vector-wise", time.time() - t0)
===========changed ref 0===========
# module: tests.test_functional
- dim1 = [1024, 2048]
- inner = [12288 * 4, 4096 * 4]
- dim4 = [12288, 4096]
-
- values = list(zip(dim1, dim4, inner))
- names = ["dim1_{}_dim4_{}_inner_{}".format(*vals) for vals in values]
-
===========changed ref 1===========
# module: tests.test_functional
- n = 6
- dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
- dim4 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
- inner = torch.randint(1, 4 * 1024, size=(n,)).tolist()
-
- values = list(zip(dim1, dim4, inner))
- names = ["dim1_{}_dim4_{}_inner_{}".format(*vals) for vals in values]
-
===========changed ref 2===========
# module: tests.test_functional
- n = 4
- dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
- dim4 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
- inner = torch.randint(1, 4 * 1024, size=(n,)).tolist()
-
- values = list(zip(dim1, dim4, inner))
- names = ["dim1_{}_dim4_{}_inner_{}".format(*vals) for vals in values]
-
===========changed ref 3===========
# module: tests.test_functional
- n = 2
- # dim1 = [8*1024]
- # dim2 = [4*1024]
- dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
- dim2 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
-
- values = list(product(dim1, dim2))
- names = ["dim1_{}_dim2_{}".format(*vals) for vals in values]
-
===========changed ref 4===========
# module: tests.test_functional
- n = 2
- dim1 = [1 * 1024]
- dim2 = [1 * 1024]
- # dim1 = torch.randint(1,4*1024, size=(n,)).tolist()
- # dim2 = torch.randint(1,4*1024, size=(n,)).tolist()
-
- dims = (2,)
- # ldb = list(range(256, 1*1024, 256))
- values = list(product(dim1, dim2, dims))
- names = ["dim1_{}_dim2_{}_dims_{}".format(*vals) for vals in values]
-
===========changed ref 5===========
# module: tests.test_functional
- dim1 = [32]
- dim2 = [32]
- dim3 = [32]
- dim4 = [32]
-
- dims = (2,)
- # ldb = list(range(256, 1*1024, 256))
- values = list(product(dim1, dim2, dim3, dim4, dims))
- names = [
- "dim1_{}_dim2_{}_dim3_{}_dim4_{}_dims_{}".format(*vals)
- for vals in values
- ]
-
===========changed ref 6===========
# module: tests.test_functional
- n = 1
- dim1 = torch.randint(1, 64, size=(n,)).tolist()
- dim2 = torch.randint(32, 128, size=(n,)).tolist()
- dim3 = torch.randint(32, 256, size=(n,)).tolist()
- values = list(product(dim1, dim2, dim3))
- names = ["dim1_{}_dim2_{}_dim3_{}".format(*vals) for vals in values]
-
===========changed ref 7===========
# module: tests.test_functional
- n = 1
- dim1 = torch.randint(1, 256, size=(n,)).tolist()
- dim2 = torch.randint(32, 512, size=(n,)).tolist()
- dim3 = torch.randint(32, 1024, size=(n,)).tolist()
- dim4 = torch.randint(32, 1024, size=(n,)).tolist()
-
- # dim1 = [2]
- # dim2 = [2]
- # dim3 = [2]
- # dim4 = [2]
-
- dims = (2, 3)
- ldb = [0]
- # ldb = list(range(256, 1*1024, 256))
- values = list(product(dim1, dim2, dim3, dim4, dims, ldb))
- names = [
- "dim1_{}_dim2_{}_dim3_{}_dim4_{}_dims_{}_ldb_{}".format(*vals)
- for vals in values
- ]
-
===========changed ref 8===========
# module: tests.test_functional
- n = 2
- dim1 = torch.randint(2, 256, size=(n,)).tolist()
- dim2 = torch.randint(2, 256, size=(n,)).tolist()
- dim3 = torch.randint(2, 256, size=(n,)).tolist()
- # dim1, dim2 = (256,), (256,)
- dtype = [torch.int8, torch.int32]
- a_order = ["row"]
- out_order = ["col", "row", "col32"]
- transpose = [False]
- dims = [2, 3]
- values = list(product(dim1, dim2, dim3, dims, dtype, a_order, out_order, transpose))
-
- names = ["dim1_{}_dim2_{}_dim3_{}_dims_{}_dtype_{}_orderA_{}_orderOut_{}_transpose_{}".format(*vals)for vals in values]
-
===========changed ref 9===========
# module: tests.test_functional
- n = 2
- dim1 = torch.randint(1, 64, size=(n,)).tolist()
- dim2 = torch.randint(32, 128, size=(n,)).tolist()
- dim3 = torch.randint(32, 256, size=(n,)).tolist()
- dim4 = torch.randint(32, 256, size=(n,)).tolist()
- transpose = [(False, False), (True, False), (False, True), (True, True)]
- values = list(product(dim1, dim2, dim3, dim4, transpose))
- names = [
- "dim1_{}_dim2_{}_dim3_{}_dim4_{}_transpose_{}".format(*vals)
- for vals in values
- ]
-
|
tests.test_functional/test_gemv_eye_4bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
89876bb0e29e45c2823e33e2fceac623a00b4fd3
|
Merge pull request #876 from wkpark/minimal-win-fix
|
<2>:<add> dims = get_test_dims(0, 8192, n=dims)
<del> dims = torch.randint(0, 8192, size=(dims,)).tolist()
|
<s>.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=describe_dtype)
- @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
@pytest.mark.parametrize("double_quant", [False], ids=['DQ_True'])
def test_gemv_eye_4bit(storage_type, dtype, double_quant):
<0> dims = 10
<1> torch.random.manual_seed(np.random.randint(0, 412424242))
<2> dims = torch.randint(0, 8192, size=(dims,)).tolist()
<3> dims = [dim + (64-(dim % 64)) for dim in dims]
<4> #for dim in [576, 5120, 3520, 5184, 1280, 4992, 5312, 2048]:
<5> for dim in dims:
<6> A = torch.normal(0, 0.1, size=(1, 1, dim), dtype=dtype, device='cuda')
<7> B = torch.eye(dim, dtype=dtype, device='cuda')
<8>
<9> qB, state = F.quantize_4bit(B, quant_type=storage_type, compress_statistics=double_quant)
<10> C3 = torch.matmul(A, B.t())
<11> C2 = bnb.matmul_4bit(A, qB.t(), state)
<12> A.requires_grad = True
<13> C1 = bnb.matmul_4bit(A, qB.t(), state)
<14>
<15> torch.testing.assert_close(A, C3)
<16> torch.testing.assert_close(A, C1)
<17> torch.testing.assert_close(A, C2)
<18>
|
===========changed ref 0===========
+ # module: tests.helpers
+ test_dims_rng = random.Random(42)
+
===========changed ref 1===========
# module: tests.test_functional
- n = 2
- # dim1 = torch.randint(1,1*1024, size=(n,)).tolist()
- # dim2 = torch.randint(1,4*1024, size=(n,)).tolist()
- dim1 = [1 * 2048]
- # dim2 = [12288]
- dim2 = [2048]
- # dim1 = [2]
- # dim2 = [2]
- dtype = [torch.int8]
- values = list(product(dim1, dim2, dtype))
- names = ["dim1_{}_dim2_{}_dtype_{}".format(*vals) for vals in values]
-
===========changed ref 2===========
+ # module: tests.helpers
+ def describe_dtype(dtype: torch.dtype) -> str:
+ return DTYPE_NAMES.get(dtype) or str(dtype).rpartition(".")[2]
+
===========changed ref 3===========
+ # module: tests.helpers
+ def get_test_dims(min: int, max: int, *, n: int) -> list[int]:
+ return [test_dims_rng.randint(min, max) for _ in range(n)]
+
===========changed ref 4===========
+ # module: tests.helpers
+ def id_formatter(label: str):
+ """
+ Return a function that formats the value given to it with the given label.
+ """
+ return lambda value: format_with_label(label, value)
+
===========changed ref 5===========
# module: tests.test_functional
- batch_size = 1
- seqdim = 1
- values = []
- #values.append((batch_size, seqdim, 768, 4 * 768))
- #values.append((batch_size, seqdim, 1024, 4*1024))
- #values.append((batch_size, seqdim, 1536, 4*1536))
- #values.append((batch_size, seqdim, 2048, 4*2048))
- #values.append((batch_size, seqdim, 2560, 4*2560))
- #values.append((batch_size, seqdim, 4096, 4*4096))
- #values.append((batch_size, seqdim, 5120, 4*5120))
- values.append((batch_size, seqdim, 6656, 4*6656))
- #values.append((batch_size, seqdim, 8192, 4*8192))
- #values.append((batch_size, seqdim, 5140, 4*5140))
- #values.append((batch_size, seqdim, 12288, 4*12288))
- names = ["batch_{}_seq_{}_model_{}_hidden_{}".format(*vals) for vals in values]
+ ),
+ "vectorwise": (quant_multi, quant_multi, dequant, dequant, mm_dequant),
+ }
===========changed ref 6===========
+ # module: tests.helpers
+ TRUE_FALSE = (True, False)
+ BOOLEAN_TRIPLES = list(
+ product(TRUE_FALSE, repeat=3)
+ ) # all combinations of (bool, bool, bool)
+ BOOLEAN_TUPLES = list(product(TRUE_FALSE, repeat=2)) # all combinations of (bool, bool)
+
===========changed ref 7===========
# module: tests.test_functional
- dim1 = [1024, 2048]
- inner = [12288 * 4, 4096 * 4]
- dim4 = [12288, 4096]
-
- values = list(zip(dim1, dim4, inner))
- names = ["dim1_{}_dim4_{}_inner_{}".format(*vals) for vals in values]
-
===========changed ref 8===========
# module: tests.test_optim
- dim1 = [1024]
- dim2 = [32, 1024, 4097]
- gtype = [torch.float32, torch.float16]
- values = list(product(dim1, dim2, gtype))
- names = ["dim1_{}_dim2_{}_gtype_{}".format(*vals) for vals in values]
-
===========changed ref 9===========
# module: tests.test_functional
- n = 2
- dim1 = torch.randint(256, 1 * 1024, size=(n,)).tolist()
- dim2 = torch.randint(256, 1 * 1024, size=(n,)).tolist()
- values = list(product(dim1, dim2))
- names = ["dim1_{}_dim2_{}".format(*vals) for vals in values]
-
===========changed ref 10===========
+ # module: tests.helpers
+ def format_with_label(label: str, value: Any) -> str:
+ if isinstance(value, bool):
+ formatted = "T" if value else "F"
+ elif isinstance(value, (list, tuple)) and all(isinstance(v, bool) for v in value):
+ formatted = "".join("T" if b else "F" for b in value)
+ else:
+ formatted = str(value)
+ return f"{label}={formatted}"
+
===========changed ref 11===========
+ # module: tests.helpers
+ DTYPE_NAMES = {
+ torch.bfloat16: "bf16",
+ torch.bool: "bool",
+ torch.float16: "fp16",
+ torch.float32: "fp32",
+ torch.float64: "fp64",
+ torch.int32: "int32",
+ torch.int64: "int64",
+ torch.int8: "int8",
+ }
+
===========changed ref 12===========
# module: tests.test_functional
- n = 2
- dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
- dim2 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
- # dim1 = [4]
- # dim2 = [5]
-
- values = list(product(dim1, dim2))
- names = ["dim1_{}_dim2_{}".format(*vals) for vals in values]
-
===========changed ref 13===========
# module: tests.test_functional
- n = 2
- # dim1 = [8*1024]
- # dim2 = [4*1024]
- dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
- dim2 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
-
- values = list(product(dim1, dim2))
- names = ["dim1_{}_dim2_{}".format(*vals) for vals in values]
-
===========changed ref 14===========
# module: tests.test_functional
- n = 1
- dim1 = torch.randint(1, 64, size=(n,)).tolist()
- dim2 = torch.randint(32, 128, size=(n,)).tolist()
- dim3 = torch.randint(32, 256, size=(n,)).tolist()
- values = list(product(dim1, dim2, dim3))
- names = ["dim1_{}_dim2_{}_dim3_{}".format(*vals) for vals in values]
-
===========changed ref 15===========
# module: tests.test_functional
- n = 6
- dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
- dim4 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
- inner = torch.randint(1, 4 * 1024, size=(n,)).tolist()
-
- values = list(zip(dim1, dim4, inner))
- names = ["dim1_{}_dim4_{}_inner_{}".format(*vals) for vals in values]
-
===========changed ref 16===========
# module: tests.test_functional
- n = 4
- dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
- dim4 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
- inner = torch.randint(1, 4 * 1024, size=(n,)).tolist()
-
- values = list(zip(dim1, dim4, inner))
- names = ["dim1_{}_dim4_{}_inner_{}".format(*vals) for vals in values]
-
|
tests.conftest/pytest_runtest_call
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
89876bb0e29e45c2823e33e2fceac623a00b4fd3
|
Merge pull request #876 from wkpark/minimal-win-fix
|
<2>:<add> except NotImplementedError as nie:
<add> if "NO_CUBLASLT" in str(nie):
<add> pytest.skip("CUBLASLT not available")
<add> raise
|
# module: tests.conftest
def pytest_runtest_call(item):
<0> try:
<1> item.runtest()
<2> except AssertionError as ae:
<3> if str(ae) == "Torch not compiled with CUDA enabled":
<4> pytest.skip("Torch not compiled with CUDA enabled")
<5> raise
<6>
|
===========unchanged ref 0===========
at: _pytest.outcomes
_WithException(*, allow_module_level: bool=False, msg: Optional[str]=None) -> NoReturn
===========changed ref 0===========
+ # module: tests.helpers
+ test_dims_rng = random.Random(42)
+
===========changed ref 1===========
+ # module: tests.helpers
+ def describe_dtype(dtype: torch.dtype) -> str:
+ return DTYPE_NAMES.get(dtype) or str(dtype).rpartition(".")[2]
+
===========changed ref 2===========
+ # module: tests.helpers
+ def get_test_dims(min: int, max: int, *, n: int) -> list[int]:
+ return [test_dims_rng.randint(min, max) for _ in range(n)]
+
===========changed ref 3===========
+ # module: tests.helpers
+ def id_formatter(label: str):
+ """
+ Return a function that formats the value given to it with the given label.
+ """
+ return lambda value: format_with_label(label, value)
+
===========changed ref 4===========
+ # module: tests.helpers
+ TRUE_FALSE = (True, False)
+ BOOLEAN_TRIPLES = list(
+ product(TRUE_FALSE, repeat=3)
+ ) # all combinations of (bool, bool, bool)
+ BOOLEAN_TUPLES = list(product(TRUE_FALSE, repeat=2)) # all combinations of (bool, bool)
+
===========changed ref 5===========
# module: tests.test_functional
- dim1 = [1024, 2048]
- inner = [12288 * 4, 4096 * 4]
- dim4 = [12288, 4096]
-
- values = list(zip(dim1, dim4, inner))
- names = ["dim1_{}_dim4_{}_inner_{}".format(*vals) for vals in values]
-
===========changed ref 6===========
# module: tests.test_optim
- dim1 = [1024]
- dim2 = [32, 1024, 4097]
- gtype = [torch.float32, torch.float16]
- values = list(product(dim1, dim2, gtype))
- names = ["dim1_{}_dim2_{}_gtype_{}".format(*vals) for vals in values]
-
===========changed ref 7===========
# module: tests.test_functional
- n = 2
- dim1 = torch.randint(256, 1 * 1024, size=(n,)).tolist()
- dim2 = torch.randint(256, 1 * 1024, size=(n,)).tolist()
- values = list(product(dim1, dim2))
- names = ["dim1_{}_dim2_{}".format(*vals) for vals in values]
-
===========changed ref 8===========
+ # module: tests.helpers
+ def format_with_label(label: str, value: Any) -> str:
+ if isinstance(value, bool):
+ formatted = "T" if value else "F"
+ elif isinstance(value, (list, tuple)) and all(isinstance(v, bool) for v in value):
+ formatted = "".join("T" if b else "F" for b in value)
+ else:
+ formatted = str(value)
+ return f"{label}={formatted}"
+
===========changed ref 9===========
+ # module: tests.helpers
+ DTYPE_NAMES = {
+ torch.bfloat16: "bf16",
+ torch.bool: "bool",
+ torch.float16: "fp16",
+ torch.float32: "fp32",
+ torch.float64: "fp64",
+ torch.int32: "int32",
+ torch.int64: "int64",
+ torch.int8: "int8",
+ }
+
===========changed ref 10===========
# module: tests.test_functional
- n = 2
- dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
- dim2 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
- # dim1 = [4]
- # dim2 = [5]
-
- values = list(product(dim1, dim2))
- names = ["dim1_{}_dim2_{}".format(*vals) for vals in values]
-
===========changed ref 11===========
# module: tests.test_functional
- n = 2
- # dim1 = [8*1024]
- # dim2 = [4*1024]
- dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
- dim2 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
-
- values = list(product(dim1, dim2))
- names = ["dim1_{}_dim2_{}".format(*vals) for vals in values]
-
===========changed ref 12===========
# module: tests.test_functional
- n = 1
- dim1 = torch.randint(1, 64, size=(n,)).tolist()
- dim2 = torch.randint(32, 128, size=(n,)).tolist()
- dim3 = torch.randint(32, 256, size=(n,)).tolist()
- values = list(product(dim1, dim2, dim3))
- names = ["dim1_{}_dim2_{}_dim3_{}".format(*vals) for vals in values]
-
===========changed ref 13===========
# module: tests.test_functional
- n = 6
- dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
- dim4 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
- inner = torch.randint(1, 4 * 1024, size=(n,)).tolist()
-
- values = list(zip(dim1, dim4, inner))
- names = ["dim1_{}_dim4_{}_inner_{}".format(*vals) for vals in values]
-
===========changed ref 14===========
# module: tests.test_functional
- n = 4
- dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
- dim4 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
- inner = torch.randint(1, 4 * 1024, size=(n,)).tolist()
-
- values = list(zip(dim1, dim4, inner))
- names = ["dim1_{}_dim4_{}_inner_{}".format(*vals) for vals in values]
-
===========changed ref 15===========
# module: tests.test_functional
- dim1 = [32]
- dim2 = [32]
- dim3 = [32]
- dim4 = [32]
-
- dims = (2,)
- # ldb = list(range(256, 1*1024, 256))
- values = list(product(dim1, dim2, dim3, dim4, dims))
- names = [
- "dim1_{}_dim2_{}_dim3_{}_dim4_{}_dims_{}".format(*vals)
- for vals in values
- ]
-
===========changed ref 16===========
# module: tests.test_functional
- n = 3
- seq_dim = torch.randint(32, 512, size=(n,)).tolist()
- hidden_dim = torch.randint(32, 1024 * 4, size=(n,)).tolist()
- batch_dim = torch.randint(2, 16, size=(n,)).tolist()
- values = list(product(seq_dim, hidden_dim, batch_dim))
- names = [
- "seq_dim{}_hidden_dim{}_batch_dim{}".format(*vals) for vals in values
- ]
-
===========changed ref 17===========
# module: tests.test_functional
- n = 2
- dim1 = torch.randint(1, 1 * 1024, size=(n,)).tolist()
- dim2 = torch.randint(1, 1 * 1024, size=(n,)).tolist()
- # dim1 = [7]
- # dim2 = [11]
- transposed_B = [False, True]
- values = list(product(dim1, dim2, transposed_B))
- names = ["dim1_{}_dim2_{}_transposed_B_{}".format(*vals) for vals in values]
-
|
tests.test_generation/test_pi
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
89876bb0e29e45c2823e33e2fceac623a00b4fd3
|
Merge pull request #876 from wkpark/minimal-win-fix
|
<0>:<del> print('')
<1>:<del> dtype = torch.float16
<2>:<del>
|
<s>kernel", [True, False], ids=['inference_kernel_True', 'inference_kernel_False'])
- #@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
+ def test_pi(requires_cuda, model_and_tokenizer, inference_kernel, DQ, dtype):
- def test_pi(requires_cuda, model_and_tokenizer, inference_kernel, DQ):
<0> print('')
<1> dtype = torch.float16
<2>
<3> fixture_config, model, tokenizer = model_and_tokenizer
<4>
<5> generation_config = transformers.GenerationConfig(
<6> max_new_tokens=20,
<7> do_sample=True,
<8> top_p=0.9,
<9> temperature=0.7,
<10> )
<11> generation_config.max_new_tokens = 20
<12>
<13>
<14> #text = 'Please write down the first 50 digits of pi.'
<15> #text = get_prompt_for_generation_eval(text)
<16> #text += ' Sure, here the first 50 digits of pi: 3.14159'
<17> n_cases = 6
<18> text = '3.14159'
<19> if hasattr(model.config, 'quantization_config'):
<20> model.config.quantization_config.bnb_4bit_compute_dtype = dtype
<21> model.config.quantization_config.bnb_4bit_use_double_quant = DQ
<22>
<23> if not inference_kernel:
<24> text = [text]*n_cases
<25> inputs = tokenizer(text, return_tensors="pt").to('cuda:0')
<26> x = inputs['input_ids']
<27> outputs = []
<28> if inference_kernel:
<29> for i in range(n_cases):
<30> output = model.generate(x, generation_config=generation_config)
<31> textout = tokenizer.decode(output[0], skip_special_tokens=True)
<32> outputs.append(text</s>
|
===========below chunk 0===========
<s> False], ids=['inference_kernel_True', 'inference_kernel_False'])
- #@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=['fp16', 'bf16', 'fp32'])
+ def test_pi(requires_cuda, model_and_tokenizer, inference_kernel, DQ, dtype):
- def test_pi(requires_cuda, model_and_tokenizer, inference_kernel, DQ):
# offset: 1
else:
outputs = model.generate(x, generation_config=generation_config)
outputs = [tokenizer.decode(output, skip_special_tokens=True) for output in outputs]
assert len(outputs) == n_cases
failure_count = 0
for i in range(n_cases):
if not outputs[i][:len(str(math.pi))] == str(math.pi):
failure_count += 1
failure_max = (2 if fixture_config[0] == 'huggyllama/llama-7b' else 4)
if failure_count > failure_max:
print(math.pi)
for out in outputs:
print(out)
raise ValueError(f'Failure count: {failure_count}/{n_cases}')
===========unchanged ref 0===========
at: _pytest.mark.structures
MARK_GEN = MarkGenerator(_ispytest=True)
at: _pytest.mark.structures.MarkGenerator
skip: _SkipMarkDecorator
skipif: _SkipifMarkDecorator
xfail: _XfailMarkDecorator
parametrize: _ParametrizeMarkDecorator
usefixtures: _UsefixturesMarkDecorator
filterwarnings: _FilterwarningsMarkDecorator
at: math
pi: float
at: tests.conftest
requires_cuda() -> bool
at: tests.helpers
id_formatter(label: str)
describe_dtype(dtype: torch.dtype) -> str
TRUE_FALSE = (True, False)
at: torch._C
float16: dtype = ...
at: transformers.generation.configuration_utils
GenerationConfig(**kwargs)
at: transformers.generation.configuration_utils.GenerationConfig.__init__
self.max_new_tokens = kwargs.pop("max_new_tokens", None)
===========changed ref 0===========
+ # module: tests.helpers
+ def describe_dtype(dtype: torch.dtype) -> str:
+ return DTYPE_NAMES.get(dtype) or str(dtype).rpartition(".")[2]
+
===========changed ref 1===========
+ # module: tests.helpers
+ def id_formatter(label: str):
+ """
+ Return a function that formats the value given to it with the given label.
+ """
+ return lambda value: format_with_label(label, value)
+
===========changed ref 2===========
# module: tests.test_generation
models = ['huggyllama/llama-7b', 'bigscience/bloom-1b7']
dtypes = ['nf4', 'fp4']
- load_in_4bit = [True, False]
- values = list(product(models, dtypes))
- strfunc = lambda lst: [str(x) for x in lst]
- ids = ['_'.join(strfunc(x)) for x in values]
===========changed ref 3===========
+ # module: tests.helpers
+ test_dims_rng = random.Random(42)
+
===========changed ref 4===========
+ # module: tests.helpers
+ def get_test_dims(min: int, max: int, *, n: int) -> list[int]:
+ return [test_dims_rng.randint(min, max) for _ in range(n)]
+
===========changed ref 5===========
+ # module: tests.helpers
+ TRUE_FALSE = (True, False)
+ BOOLEAN_TRIPLES = list(
+ product(TRUE_FALSE, repeat=3)
+ ) # all combinations of (bool, bool, bool)
+ BOOLEAN_TUPLES = list(product(TRUE_FALSE, repeat=2)) # all combinations of (bool, bool)
+
===========changed ref 6===========
# module: tests.test_functional
- dim1 = [1024, 2048]
- inner = [12288 * 4, 4096 * 4]
- dim4 = [12288, 4096]
-
- values = list(zip(dim1, dim4, inner))
- names = ["dim1_{}_dim4_{}_inner_{}".format(*vals) for vals in values]
-
===========changed ref 7===========
# module: tests.test_optim
- dim1 = [1024]
- dim2 = [32, 1024, 4097]
- gtype = [torch.float32, torch.float16]
- values = list(product(dim1, dim2, gtype))
- names = ["dim1_{}_dim2_{}_gtype_{}".format(*vals) for vals in values]
-
===========changed ref 8===========
# module: tests.test_functional
- n = 2
- dim1 = torch.randint(256, 1 * 1024, size=(n,)).tolist()
- dim2 = torch.randint(256, 1 * 1024, size=(n,)).tolist()
- values = list(product(dim1, dim2))
- names = ["dim1_{}_dim2_{}".format(*vals) for vals in values]
-
===========changed ref 9===========
+ # module: tests.helpers
+ def format_with_label(label: str, value: Any) -> str:
+ if isinstance(value, bool):
+ formatted = "T" if value else "F"
+ elif isinstance(value, (list, tuple)) and all(isinstance(v, bool) for v in value):
+ formatted = "".join("T" if b else "F" for b in value)
+ else:
+ formatted = str(value)
+ return f"{label}={formatted}"
+
===========changed ref 10===========
+ # module: tests.helpers
+ DTYPE_NAMES = {
+ torch.bfloat16: "bf16",
+ torch.bool: "bool",
+ torch.float16: "fp16",
+ torch.float32: "fp32",
+ torch.float64: "fp64",
+ torch.int32: "int32",
+ torch.int64: "int64",
+ torch.int8: "int8",
+ }
+
===========changed ref 11===========
# module: tests.conftest
def pytest_runtest_call(item):
try:
item.runtest()
+ except NotImplementedError as nie:
+ if "NO_CUBLASLT" in str(nie):
+ pytest.skip("CUBLASLT not available")
+ raise
except AssertionError as ae:
if str(ae) == "Torch not compiled with CUDA enabled":
pytest.skip("Torch not compiled with CUDA enabled")
raise
===========changed ref 12===========
# module: tests.test_functional
- n = 2
- dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
- dim2 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
- # dim1 = [4]
- # dim2 = [5]
-
- values = list(product(dim1, dim2))
- names = ["dim1_{}_dim2_{}".format(*vals) for vals in values]
-
===========changed ref 13===========
# module: tests.test_functional
- n = 2
- # dim1 = [8*1024]
- # dim2 = [4*1024]
- dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
- dim2 = torch.randint(1, 4 * 1024, size=(n,)).tolist()
-
- values = list(product(dim1, dim2))
- names = ["dim1_{}_dim2_{}".format(*vals) for vals in values]
-
|
bitsandbytes.cuda_setup.main/CUDASetup.manual_override
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
259ad44110940259b6a6b545b8a8b2a69289bbef
|
CUDA setup cleanup (#996)
|
<0>:<add> if not torch.cuda.is_available():
<del> if torch.cuda.is_available():
<1>:<del> if 'BNB_CUDA_VERSION' in os.environ:
<2>:<del> if len(os.environ['BNB_CUDA_VERSION']) > 0:
<3>:<del> warn(
<4>:<del> f'\n\n{"=" * 80}\n'
<5>:<del> 'WARNING: Manual override via BNB_CUDA_VERSION env variable detected!\n'
<6>:<del> 'BNB_CUDA_VERSION=XXX can be used to load a bitsandbytes version that is different from the PyTorch CUDA version.\n'
<7>:<del> 'If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\n'
<8>:<del> 'If you use the manual override make sure the right libcudart.so is in your LD_LIBRARY_PATH\n'
<9>:<del> 'For example by adding the following to your .bashrc: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<path_to_cuda_dir/lib64\n'
<10>:<del> f'Loading CUDA version: BNB_CUDA_VERSION={os.environ["BNB_CUDA_VERSION"]}'
<11>:<del> f'\n{"=" * 80}\n\n'
<12>:<del> )
<13>:<del> binary_name = self.binary_name.rsplit(".", 1)[0]
<14>:<del> suffix = ".so" if os.name != "nt" else ".dll"
<15>:<del> self.binary_name = binary_name[:-3] + f'{os.environ["BNB_CUDA_VERSION"]}.{suffix}'
<16>:<add> return
<add> override_value = os.environ.get('BNB_CUDA_VERSION')
<add> if not override_value:
<add> return
|
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def manual_override(self):
<0> if torch.cuda.is_available():
<1> if 'BNB_CUDA_VERSION' in os.environ:
<2> if len(os.environ['BNB_CUDA_VERSION']) > 0:
<3> warn(
<4> f'\n\n{"=" * 80}\n'
<5> 'WARNING: Manual override via BNB_CUDA_VERSION env variable detected!\n'
<6> 'BNB_CUDA_VERSION=XXX can be used to load a bitsandbytes version that is different from the PyTorch CUDA version.\n'
<7> 'If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\n'
<8> 'If you use the manual override make sure the right libcudart.so is in your LD_LIBRARY_PATH\n'
<9> 'For example by adding the following to your .bashrc: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<path_to_cuda_dir/lib64\n'
<10> f'Loading CUDA version: BNB_CUDA_VERSION={os.environ["BNB_CUDA_VERSION"]}'
<11> f'\n{"=" * 80}\n\n'
<12> )
<13> binary_name = self.binary_name.rsplit(".", 1)[0]
<14> suffix = ".so" if os.name != "nt" else ".dll"
<15> self.binary_name = binary_name[:-3] + f'{os.environ["BNB_CUDA_VERSION"]}.{suffix}'
<16>
|
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
- # these are the most common libs names
- # libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
- # we have libcudart.so.11.0 which causes a lot of errors before
- # not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
- system = platform.system()
- if system == 'Windows':
+ if platform.system() == 'Windows': # Windows
CUDA_RUNTIME_LIBS = ["nvcuda.dll"]
+ DYNAMIC_LIBRARY_SUFFIX = ".dll"
+ else: # Linux or other
- else: # Linux or other
+ # these are the most common libs names
+ # libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
+ # we have libcudart.so.11.0 which causes a lot of errors before
+ # not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
+ CUDA_RUNTIME_LIBS = ["libcudart.so", "libcudart.so.11.0", "libcudart.so.12.0", "libcudart.so.12.1", "libcudart.so.12.2"]
- CUDA_RUNTIME_LIBS = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
+ DYNAMIC_LIBRARY_SUFFIX = ".so"
- # this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths
- backup_paths = []
- backup_paths.append('$CONDA_PREFIX/lib/libcudart.so.11.0')
-
|
bitsandbytes.cuda_setup.main/CUDASetup.run_cuda_setup
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
259ad44110940259b6a6b545b8a8b2a69289bbef
|
CUDA setup cleanup (#996)
|
<14>:<del> suffix = ".so" if os.name != "nt" else ".dll"
<18>:<add> legacy_binary_name = f"libbitsandbytes_cpu{DYNAMIC_LIBRARY_SUFFIX}"
<del> legacy_binary_name = f"libbitsandbytes_cpu{suffix}"
|
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
<0> self.initialized = True
<1> self.cuda_setup_log = []
<2>
<3> binary_name, cudart_path, cc, cuda_version_string = evaluate_cuda_setup()
<4> self.cudart_path = cudart_path
<5> self.cuda_available = torch.cuda.is_available()
<6> self.cc = cc
<7> self.cuda_version_string = cuda_version_string
<8> self.binary_name = binary_name
<9> self.manual_override()
<10>
<11> package_dir = Path(__file__).parent.parent
<12> binary_path = package_dir / self.binary_name
<13>
<14> suffix = ".so" if os.name != "nt" else ".dll"
<15> try:
<16> if not binary_path.exists():
<17> self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?")
<18> legacy_binary_name = f"libbitsandbytes_cpu{suffix}"
<19> self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...")
<20> binary_path = package_dir / legacy_binary_name
<21> if not binary_path.exists() or torch.cuda.is_available():
<22> self.add_log_entry('')
<23> self.add_log_entry('='*48 + 'ERROR' + '='*37)
<24> self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:')
<25> self.add_log_entry('1. You need to manually override the PyTorch CUDA version. Please see: '
<26> '"https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md')
<27> self.add_log_entry('2. CUDA driver not installed')
<28> </s>
|
===========below chunk 0===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
# offset: 1
self.add_log_entry('4. You have multiple conflicting CUDA libraries')
self.add_log_entry('5. Required library not pre-compiled for this bitsandbytes release!')
self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.')
self.add_log_entry('CUDA SETUP: The CUDA version for the compile might depend on your conda install. Inspect CUDA version via `conda list | grep cuda`.')
self.add_log_entry('='*80)
self.add_log_entry('')
self.generate_instructions()
raise Exception('CUDA SETUP: Setup Failed!')
self.lib = ct.cdll.LoadLibrary(str(binary_path))
else:
self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path!s}...")
self.lib = ct.cdll.LoadLibrary(str(binary_path))
except Exception as ex:
self.add_log_entry(str(ex))
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def manual_override(self):
+ if not torch.cuda.is_available():
- if torch.cuda.is_available():
- if 'BNB_CUDA_VERSION' in os.environ:
- if len(os.environ['BNB_CUDA_VERSION']) > 0:
- warn(
- f'\n\n{"=" * 80}\n'
- 'WARNING: Manual override via BNB_CUDA_VERSION env variable detected!\n'
- 'BNB_CUDA_VERSION=XXX can be used to load a bitsandbytes version that is different from the PyTorch CUDA version.\n'
- 'If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\n'
- 'If you use the manual override make sure the right libcudart.so is in your LD_LIBRARY_PATH\n'
- 'For example by adding the following to your .bashrc: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<path_to_cuda_dir/lib64\n'
- f'Loading CUDA version: BNB_CUDA_VERSION={os.environ["BNB_CUDA_VERSION"]}'
- f'\n{"=" * 80}\n\n'
- )
- binary_name = self.binary_name.rsplit(".", 1)[0]
- suffix = ".so" if os.name != "nt" else ".dll"
- self.binary_name = binary_name[:-3] + f'{os.environ["BNB_CUDA_VERSION"]}.{suffix}'
+ return
+ override_value = os.environ.get('BNB_CUDA_VERSION')
+ if not override_value:
+ return
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
- # these are the most common libs names
- # libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
- # we have libcudart.so.11.0 which causes a lot of errors before
- # not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
- system = platform.system()
- if system == 'Windows':
+ if platform.system() == 'Windows': # Windows
CUDA_RUNTIME_LIBS = ["nvcuda.dll"]
+ DYNAMIC_LIBRARY_SUFFIX = ".dll"
+ else: # Linux or other
- else: # Linux or other
+ # these are the most common libs names
+ # libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
+ # we have libcudart.so.11.0 which causes a lot of errors before
+ # not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
+ CUDA_RUNTIME_LIBS = ["libcudart.so", "libcudart.so.11.0", "libcudart.so.12.0", "libcudart.so.12.1", "libcudart.so.12.2"]
- CUDA_RUNTIME_LIBS = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
+ DYNAMIC_LIBRARY_SUFFIX = ".so"
- # this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths
- backup_paths = []
- backup_paths.append('$CONDA_PREFIX/lib/libcudart.so.11.0')
-
|
bitsandbytes.cuda_setup.main/evaluate_cuda_setup
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
259ad44110940259b6a6b545b8a8b2a69289bbef
|
CUDA setup cleanup (#996)
|
<1>:<del> suffix = ".so" if os.name != "nt" else ".dll"
<8>:<add>
<add> if not torch.cuda.is_available():
<add> return f'libbitsandbytes_cpu{DYNAMIC_LIBRARY_SUFFIX}', None, None, None
<del> if not torch.cuda.is_available(): return f'libbitsandbytes_cpu{suffix}', None, None, None
<11>:<del> ccs = get_compute_capabilities()
<12>:<del> ccs.sort()
<13>:<add> cc = get_compute_capabilities()[-1] # we take the highest capability
<del> cc = ccs[-1] # we take the highest capability
|
# module: bitsandbytes.cuda_setup.main
def evaluate_cuda_setup():
<0> cuda_setup = CUDASetup.get_instance()
<1> suffix = ".so" if os.name != "nt" else ".dll"
<2> if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
<3> cuda_setup.add_log_entry('')
<4> cuda_setup.add_log_entry('='*35 + 'BUG REPORT' + '='*35)
<5> cuda_setup.add_log_entry(('Welcome to bitsandbytes. For bug reports, please run\n\npython -m bitsandbytes\n\n'),
<6> ('and submit this information together with your error trace to: https://github.com/TimDettmers/bitsandbytes/issues'))
<7> cuda_setup.add_log_entry('='*80)
<8> if not torch.cuda.is_available(): return f'libbitsandbytes_cpu{suffix}', None, None, None
<9>
<10> cudart_path = determine_cuda_runtime_lib_path()
<11> ccs = get_compute_capabilities()
<12> ccs.sort()
<13> cc = ccs[-1] # we take the highest capability
<14> cuda_version_string = get_cuda_version()
<15>
<16> cuda_setup.add_log_entry(f"CUDA SETUP: PyTorch settings found: CUDA_VERSION={cuda_version_string}, Highest Compute Capability: {cc}.")
<17> cuda_setup.add_log_entry(
<18> "CUDA SETUP: To manually override the PyTorch CUDA version please see:"
<19> "https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md"
<20> )
<21>
<22>
<23> # 7.5 is the minimum CC vor cublaslt
<24> has_cublaslt = is_cublasLt_compatible(cc)
<25>
<26> # TODO:
<27> </s>
|
===========below chunk 0===========
# module: bitsandbytes.cuda_setup.main
def evaluate_cuda_setup():
# offset: 1
# (2) Multiple CUDA versions installed
# we use ls -l instead of nvcc to determine the cuda version
# since most installations will have the libcudart.so installed, but not the compiler
if has_cublaslt:
binary_name = f"libbitsandbytes_cuda{cuda_version_string}"
else:
"if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt"
binary_name = f"libbitsandbytes_cuda{cuda_version_string}_nocublaslt"
binary_name = f"{binary_name}{suffix}"
return binary_name, cudart_path, cc, cuda_version_string
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def manual_override(self):
+ if not torch.cuda.is_available():
- if torch.cuda.is_available():
- if 'BNB_CUDA_VERSION' in os.environ:
- if len(os.environ['BNB_CUDA_VERSION']) > 0:
- warn(
- f'\n\n{"=" * 80}\n'
- 'WARNING: Manual override via BNB_CUDA_VERSION env variable detected!\n'
- 'BNB_CUDA_VERSION=XXX can be used to load a bitsandbytes version that is different from the PyTorch CUDA version.\n'
- 'If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\n'
- 'If you use the manual override make sure the right libcudart.so is in your LD_LIBRARY_PATH\n'
- 'For example by adding the following to your .bashrc: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<path_to_cuda_dir/lib64\n'
- f'Loading CUDA version: BNB_CUDA_VERSION={os.environ["BNB_CUDA_VERSION"]}'
- f'\n{"=" * 80}\n\n'
- )
- binary_name = self.binary_name.rsplit(".", 1)[0]
- suffix = ".so" if os.name != "nt" else ".dll"
- self.binary_name = binary_name[:-3] + f'{os.environ["BNB_CUDA_VERSION"]}.{suffix}'
+ return
+ override_value = os.environ.get('BNB_CUDA_VERSION')
+ if not override_value:
+ return
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
- # these are the most common libs names
- # libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
- # we have libcudart.so.11.0 which causes a lot of errors before
- # not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
- system = platform.system()
- if system == 'Windows':
+ if platform.system() == 'Windows': # Windows
CUDA_RUNTIME_LIBS = ["nvcuda.dll"]
+ DYNAMIC_LIBRARY_SUFFIX = ".dll"
+ else: # Linux or other
- else: # Linux or other
+ # these are the most common libs names
+ # libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
+ # we have libcudart.so.11.0 which causes a lot of errors before
+ # not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
+ CUDA_RUNTIME_LIBS = ["libcudart.so", "libcudart.so.11.0", "libcudart.so.12.0", "libcudart.so.12.1", "libcudart.so.12.2"]
- CUDA_RUNTIME_LIBS = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
+ DYNAMIC_LIBRARY_SUFFIX = ".so"
- # this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths
- backup_paths = []
- backup_paths.append('$CONDA_PREFIX/lib/libcudart.so.11.0')
-
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
self.initialized = True
self.cuda_setup_log = []
binary_name, cudart_path, cc, cuda_version_string = evaluate_cuda_setup()
self.cudart_path = cudart_path
self.cuda_available = torch.cuda.is_available()
self.cc = cc
self.cuda_version_string = cuda_version_string
self.binary_name = binary_name
self.manual_override()
package_dir = Path(__file__).parent.parent
binary_path = package_dir / self.binary_name
- suffix = ".so" if os.name != "nt" else ".dll"
try:
if not binary_path.exists():
self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?")
+ legacy_binary_name = f"libbitsandbytes_cpu{DYNAMIC_LIBRARY_SUFFIX}"
- legacy_binary_name = f"libbitsandbytes_cpu{suffix}"
self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...")
binary_path = package_dir / legacy_binary_name
if not binary_path.exists() or torch.cuda.is_available():
self.add_log_entry('')
self.add_log_entry('='*48 + 'ERROR' + '='*37)
self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:')
self.add_log_entry('1. You need to manually override the PyTorch CUDA version. Please see: '
'"https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md')
self.add_log_entry('2. CUDA driver not installed')
</s>
|
bitsandbytes.__main__/generate_bug_report_information
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
259ad44110940259b6a6b545b8a8b2a69289bbef
|
CUDA setup cleanup (#996)
|
<5>:<del> if 'CONDA_PREFIX' in os.environ:
<6>:<del> paths = find_file_recursive(os.environ['CONDA_PREFIX'], '*cuda*')
<7>:<del> print_header("ANACONDA CUDA PATHS")
<8>:<del> print(paths)
<9>:<del> print('')
<10>:<del> if isdir('/usr/local/'):
<11>:<del> paths = find_file_recursive('/usr/local', '*cuda*')
<12>:<del> print_header("/usr/local CUDA PATHS")
<13>:<del> print(paths)
<14>:<del> print('')
<15>:<del> if 'CUDA_PATH' in os.environ and isdir(os.environ['CUDA_PATH']):
<16>:<del> paths = find_file_recursive(os.environ['CUDA_PATH'], '*cuda*')
<17>:<del> print_header("CUDA PATHS")
<18>:<del> print(paths)
<19>:<del> print('')
<20>:<add> path_sources = [
<add> ("ANACONDA CUDA PATHS", os.environ.get("CONDA_PREFIX")),
<add> ("/usr/local CUDA PATHS", "/usr/local"),
<add> ("CUDA PATHS", os.environ.get("CUDA_PATH")),
<add> ("WORKING DIRECTORY CUDA PATHS", os.getcwd()),
<add> ]
<add> try:
<add> ld_library_path = os.environ.get("LD_LIBRARY_PATH")
<add> if ld_library_path:
<add> for path in set(ld_library_path.strip().split(os.pathsep)):
<add> path_sources.append((f"LD_LIBRARY_PATH {path} CUDA PATHS", path))
<add> except Exception as e:
<add> print(f"Could not parse LD_LIBRARY_PATH: {e}")
<21>:<add> for name, path in
|
# module: bitsandbytes.__main__
def generate_bug_report_information():
<0> print_header("")
<1> print_header("BUG REPORT INFORMATION")
<2> print_header("")
<3> print('')
<4>
<5> if 'CONDA_PREFIX' in os.environ:
<6> paths = find_file_recursive(os.environ['CONDA_PREFIX'], '*cuda*')
<7> print_header("ANACONDA CUDA PATHS")
<8> print(paths)
<9> print('')
<10> if isdir('/usr/local/'):
<11> paths = find_file_recursive('/usr/local', '*cuda*')
<12> print_header("/usr/local CUDA PATHS")
<13> print(paths)
<14> print('')
<15> if 'CUDA_PATH' in os.environ and isdir(os.environ['CUDA_PATH']):
<16> paths = find_file_recursive(os.environ['CUDA_PATH'], '*cuda*')
<17> print_header("CUDA PATHS")
<18> print(paths)
<19> print('')
<20>
<21> if isdir(os.getcwd()):
<22> paths = find_file_recursive(os.getcwd(), '*cuda*')
<23> print_header("WORKING DIRECTORY CUDA PATHS")
<24> print(paths)
<25> print('')
<26>
<27> print_header("LD_LIBRARY CUDA PATHS")
<28> if 'LD_LIBRARY_PATH' in os.environ:
<29> lib_path = os.environ['LD_LIBRARY_PATH'].strip()
<30> for path in set(lib_path.split(os.pathsep)):
<31> try:
<32> if isdir(path):
<33> print_header(f"{path} CUDA PATHS")
<34> paths = find_file_recursive(path, '*cuda*')
<35> print(paths)
<36> except Exception as e:
<37> print(f'Could not read LD_LIBRARY_PATH: {path} ({e})')
<38> print('')
<39>
|
===========unchanged ref 0===========
at: _collections_abc.Mapping
__slots__ = ()
__abc_tpflags__ = 1 << 6 # Py_TPFLAGS_MAPPING
get(key, default=None)
__reversed__ = None
at: bitsandbytes
PACKAGE_GITHUB_URL = "https://github.com/TimDettmers/bitsandbytes"
at: bitsandbytes.__main__
HEADER_WIDTH = 60
find_dynamic_library(folder, filename)
generate_bug_report_information()
at: bitsandbytes.__main__.generate_bug_report_information
path_sources = [
("ANACONDA CUDA PATHS", os.environ.get("CONDA_PREFIX")),
("/usr/local CUDA PATHS", "/usr/local"),
("CUDA PATHS", os.environ.get("CUDA_PATH")),
("WORKING DIRECTORY CUDA PATHS", os.getcwd()),
]
at: bitsandbytes.cextension
COMPILED_WITH_CUDA = True
COMPILED_WITH_CUDA = False
at: bitsandbytes.cuda_setup.main
get_compute_capabilities()
at: os
getcwd() -> str
environ = _createenviron()
at: os.path
isdir(s: AnyPath) -> bool
===========changed ref 0===========
# module: bitsandbytes.__main__
+ def find_dynamic_library(folder, filename):
+ for ext in ("so", "dll", "dylib"):
+ yield from glob.glob(os.path.join(folder, "**", filename + ext))
+
===========changed ref 1===========
# module: bitsandbytes.__main__
- def find_file_recursive(folder, filename):
- import glob
- outs = []
- try:
- for ext in ["so", "dll", "dylib"]:
- out = glob.glob(os.path.join(folder, "**", filename + ext))
- outs.extend(out)
- except Exception as e:
- raise RuntimeError('Error: Something when wrong when trying to find file.') from e
-
- return outs
-
===========changed ref 2===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def manual_override(self):
+ if not torch.cuda.is_available():
- if torch.cuda.is_available():
- if 'BNB_CUDA_VERSION' in os.environ:
- if len(os.environ['BNB_CUDA_VERSION']) > 0:
- warn(
- f'\n\n{"=" * 80}\n'
- 'WARNING: Manual override via BNB_CUDA_VERSION env variable detected!\n'
- 'BNB_CUDA_VERSION=XXX can be used to load a bitsandbytes version that is different from the PyTorch CUDA version.\n'
- 'If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\n'
- 'If you use the manual override make sure the right libcudart.so is in your LD_LIBRARY_PATH\n'
- 'For example by adding the following to your .bashrc: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<path_to_cuda_dir/lib64\n'
- f'Loading CUDA version: BNB_CUDA_VERSION={os.environ["BNB_CUDA_VERSION"]}'
- f'\n{"=" * 80}\n\n'
- )
- binary_name = self.binary_name.rsplit(".", 1)[0]
- suffix = ".so" if os.name != "nt" else ".dll"
- self.binary_name = binary_name[:-3] + f'{os.environ["BNB_CUDA_VERSION"]}.{suffix}'
+ return
+ override_value = os.environ.get('BNB_CUDA_VERSION')
+ if not override_value:
+ return
===========changed ref 3===========
# module: bitsandbytes.cuda_setup.main
- # these are the most common libs names
- # libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
- # we have libcudart.so.11.0 which causes a lot of errors before
- # not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
- system = platform.system()
- if system == 'Windows':
+ if platform.system() == 'Windows': # Windows
CUDA_RUNTIME_LIBS = ["nvcuda.dll"]
+ DYNAMIC_LIBRARY_SUFFIX = ".dll"
+ else: # Linux or other
- else: # Linux or other
+ # these are the most common libs names
+ # libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
+ # we have libcudart.so.11.0 which causes a lot of errors before
+ # not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
+ CUDA_RUNTIME_LIBS = ["libcudart.so", "libcudart.so.11.0", "libcudart.so.12.0", "libcudart.so.12.1", "libcudart.so.12.2"]
- CUDA_RUNTIME_LIBS = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1', 'libcudart.so.12.2']
+ DYNAMIC_LIBRARY_SUFFIX = ".so"
- # this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths
- backup_paths = []
- backup_paths.append('$CONDA_PREFIX/lib/libcudart.so.11.0')
-
===========changed ref 4===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
self.initialized = True
self.cuda_setup_log = []
binary_name, cudart_path, cc, cuda_version_string = evaluate_cuda_setup()
self.cudart_path = cudart_path
self.cuda_available = torch.cuda.is_available()
self.cc = cc
self.cuda_version_string = cuda_version_string
self.binary_name = binary_name
self.manual_override()
package_dir = Path(__file__).parent.parent
binary_path = package_dir / self.binary_name
- suffix = ".so" if os.name != "nt" else ".dll"
try:
if not binary_path.exists():
self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?")
+ legacy_binary_name = f"libbitsandbytes_cpu{DYNAMIC_LIBRARY_SUFFIX}"
- legacy_binary_name = f"libbitsandbytes_cpu{suffix}"
self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...")
binary_path = package_dir / legacy_binary_name
if not binary_path.exists() or torch.cuda.is_available():
self.add_log_entry('')
self.add_log_entry('='*48 + 'ERROR' + '='*37)
self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:')
self.add_log_entry('1. You need to manually override the PyTorch CUDA version. Please see: '
'"https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md')
self.add_log_entry('2. CUDA driver not installed')
</s>
|
bitsandbytes.cuda_setup.main/warn_in_case_of_duplicates
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
8c507d92c0950305d376b19137b5d8cccccea457
|
Enable crate-ci/typos lint; fix typos (#1005)
|
<4>:<add> "but this might mismatch with the CUDA version that is needed for bitsandbytes."
<del> "but this might missmatch with the CUDA version that is needed for bitsandbytes."
|
# module: bitsandbytes.cuda_setup.main
def warn_in_case_of_duplicates(results_paths: Set[Path]) -> None:
<0> if len(results_paths) > 1:
<1> warning_msg = (
<2> f"Found duplicate {CUDA_RUNTIME_LIBS} files: {results_paths}.. "
<3> "We select the PyTorch default libcudart.so, which is {torch.version.cuda},"
<4> "but this might missmatch with the CUDA version that is needed for bitsandbytes."
<5> "To override this behavior set the BNB_CUDA_VERSION=<version string, e.g. 122> environmental variable"
<6> "For example, if you want to use the CUDA version 122"
<7> "BNB_CUDA_VERSION=122 python ..."
<8> "OR set the environmental variable in your .bashrc: export BNB_CUDA_VERSION=122"
<9> "In the case of a manual override, make sure you set the LD_LIBRARY_PATH, e.g."
<10> "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-11.2")
<11> CUDASetup.get_instance().add_log_entry(warning_msg, is_warning=True)
<12>
|
===========unchanged ref 0===========
at: bitsandbytes.cuda_setup.main
CUDA_RUNTIME_LIBS = ["libcudart.so", "libcudart.so.11.0", "libcudart.so.12.0", "libcudart.so.12.1", "libcudart.so.12.2"]
CUDA_RUNTIME_LIBS = ["nvcuda.dll"]
CUDASetup()
at: bitsandbytes.cuda_setup.main.CUDASetup
_instance = None
get_instance()
at: pathlib
Path()
at: typing
Set = _alias(set, 1, inst=False, name='Set')
|
tests.test_functional/assert_all_approx_close
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
d11b5068dd74de6694cea0cce350bc86eb2ba5b2
|
tests: fix all_close to respect max 2 positional args (#1074)
|
<0>:<add> idx = torch.isclose(a, b, rtol=rtol, atol=atol)
<del> idx = torch.isclose(a, b, rtol, atol)
<5>:<add> torch.testing.assert_close(a, b, rtol=rtol, atol=atol)
<del> torch.testing.assert_close(a, b, rtol, atol)
|
# module: tests.test_functional
def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0, throw=True):
<0> idx = torch.isclose(a, b, rtol, atol)
<1> sumval = (idx == 0).sum().item()
<2> if sumval > count:
<3> if throw:
<4> print(f"Too many values not close: assert {sumval} < {count}")
<5> torch.testing.assert_close(a, b, rtol, atol)
<6>
<7> return sumval
<8>
| |
tests.test_modules/assert_all_approx_close
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
d11b5068dd74de6694cea0cce350bc86eb2ba5b2
|
tests: fix all_close to respect max 2 positional args (#1074)
|
<0>:<add> idx = torch.isclose(a, b, rtol=rtol, atol=atol)
<del> idx = torch.isclose(a, b, rtol, atol)
<4>:<add> torch.testing.assert_close(a, b, rtol=rtol, atol=atol)
<del> torch.testing.assert_close(a, b, rtol, atol)
|
# module: tests.test_modules
def assert_all_approx_close(a, b, atol=1e-8, rtol=1e-5, count=10):
<0> idx = torch.isclose(a, b, rtol, atol)
<1> sumval = (idx == 0).sum().item()
<2> if sumval > count:
<3> print(f"Too many values not close: assert {sumval} < {count}")
<4> torch.testing.assert_close(a, b, rtol, atol)
<5>
|
===========changed ref 0===========
# module: tests.test_functional
def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0, throw=True):
+ idx = torch.isclose(a, b, rtol=rtol, atol=atol)
- idx = torch.isclose(a, b, rtol, atol)
sumval = (idx == 0).sum().item()
if sumval > count:
if throw:
print(f"Too many values not close: assert {sumval} < {count}")
+ torch.testing.assert_close(a, b, rtol=rtol, atol=atol)
- torch.testing.assert_close(a, b, rtol, atol)
return sumval
|
tests.test_optim/test_optimizer32bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
d11b5068dd74de6694cea0cce350bc86eb2ba5b2
|
tests: fix all_close to respect max 2 positional args (#1074)
|
<s>formatter("opt"))
@pytest.mark.parametrize("gtype", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)
@pytest.mark.parametrize("dim1", [1024], ids=id_formatter("dim1"))
@pytest.mark.parametrize("dim2", [32, 1024, 4097, 1], ids=id_formatter("dim2"))
def test_optimizer32bit(dim1, dim2, gtype, optim_name):
<0> if gtype == torch.bfloat16 and optim_name in ['momentum', 'rmsprop']:
<1> pytest.skip()
<2> if dim1 == 1 and dim2 == 1:
<3> return
<4> p1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1
<5> p2 = p1.clone()
<6> p1 = p1.float()
<7>
<8> torch_optimizer = str2optimizers[optim_name][0]([p1])
<9> bnb_optimizer = str2optimizers[optim_name][1]([p2])
<10>
<11> if gtype == torch.float32:
<12> atol, rtol = 1e-6, 1e-5
<13> elif gtype == torch.bfloat16:
<14> atol, rtol = 1e-3, 1e-2
<15> else:
<16> atol, rtol = 1e-4, 1e-3
<17>
<18> for i in range(k):
<19> g = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.01
<20> p1.grad = g.clone().float()
<21> p2.grad = g.clone()
<22>
<23> bnb_optimizer.step()
<24> torch_optimizer.step()
<25>
<26> for name1, name2 in str2statenames[optim_name]:
<27> torch.testing.assert_close(
<28> torch_optimizer.state[p1][name1],
<29> bnb_optimizer.state[p2</s>
|
===========below chunk 0===========
<s>
@pytest.mark.parametrize("gtype", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)
@pytest.mark.parametrize("dim1", [1024], ids=id_formatter("dim1"))
@pytest.mark.parametrize("dim2", [32, 1024, 4097, 1], ids=id_formatter("dim2"))
def test_optimizer32bit(dim1, dim2, gtype, optim_name):
# offset: 1
atol=atol,
rtol=rtol,
)
# since Lion can have pretty noisy updates where things lie at the boundary
# allow up to 10 errors for Lion
assert_most_approx_close(p1, p2.float(), atol, rtol, max_error_count=10)
if i % (k // 5) == 0 and i > 0:
path = get_temp_dir()
torch.save(bnb_optimizer.state_dict(), join(path, "opt.pt"))
del bnb_optimizer
bnb_optimizer = None
bnb_optimizer = str2optimizers[optim_name][1]([p2])
bnb_optimizer.load_state_dict(torch.load(join(path, "opt.pt")))
rm_path(path)
# since Lion can have pretty noisy updates where things lie at the boundary
# allow up to 10 errors for Lion
assert_most_approx_close(p1, p2.float(), atol, rtol, max_error_count=10)
for name1, name2 in str2statenames[optim_name]:
# since Lion can have pretty noisy updates where things lie at the boundary
# allow up to 10 errors for Lion
assert_most_approx_close(torch_optimizer.state[p1][name1], bnb_optimizer.state[p2][name2],
atol=atol, rtol=rtol,
max_error_count=10)
if gtype !=</s>
===========below chunk 1===========
<s>
@pytest.mark.parametrize("gtype", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)
@pytest.mark.parametrize("dim1", [1024], ids=id_formatter("dim1"))
@pytest.mark.parametrize("dim2", [32, 1024, 4097, 1], ids=id_formatter("dim2"))
def test_optimizer32bit(dim1, dim2, gtype, optim_name):
# offset: 2
<s>
atol=atol, rtol=rtol,
max_error_count=10)
if gtype != torch.float32:
# the adam buffers should also be close because they are 32-bit
# but the parameters can diverge because they are 16-bit
# the difference grow larger and larger with each update
# --> copy the state to keep weights close
p1.data = p1.data.to(p2.dtype).float()
p2.copy_(p1.data)
torch.testing.assert_close(p1.to(p2.dtype), p2)
if optim_name in ["lars", "lamb"]:
assert bnb_optimizer.state[p2]["unorm_vec"] > 0.0
===========changed ref 0===========
# module: tests.test_modules
def assert_all_approx_close(a, b, atol=1e-8, rtol=1e-5, count=10):
+ idx = torch.isclose(a, b, rtol=rtol, atol=atol)
- idx = torch.isclose(a, b, rtol, atol)
sumval = (idx == 0).sum().item()
if sumval > count:
print(f"Too many values not close: assert {sumval} < {count}")
+ torch.testing.assert_close(a, b, rtol=rtol, atol=atol)
- torch.testing.assert_close(a, b, rtol, atol)
===========changed ref 1===========
# module: tests.test_functional
def assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0, throw=True):
+ idx = torch.isclose(a, b, rtol=rtol, atol=atol)
- idx = torch.isclose(a, b, rtol, atol)
sumval = (idx == 0).sum().item()
if sumval > count:
if throw:
print(f"Too many values not close: assert {sumval} < {count}")
+ torch.testing.assert_close(a, b, rtol=rtol, atol=atol)
- torch.testing.assert_close(a, b, rtol, atol)
return sumval
|
|
bitsandbytes.cuda_setup.main/CUDASetup.run_cuda_setup
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
e820409c095ea7cbb5ce156992307b84352cbf90
|
(docs) Clarify Python and CUDA Toolkit version requirement (#1076)
|
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
<0> self.initialized = True
<1> self.cuda_setup_log = []
<2>
<3> binary_name, cudart_path, cc, cuda_version_string = evaluate_cuda_setup()
<4> self.cudart_path = cudart_path
<5> self.cuda_available = torch.cuda.is_available()
<6> self.cc = cc
<7> self.cuda_version_string = cuda_version_string
<8> self.binary_name = binary_name
<9> self.manual_override()
<10>
<11> package_dir = Path(__file__).parent.parent
<12> binary_path = package_dir / self.binary_name
<13>
<14> try:
<15> if not binary_path.exists():
<16> self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?")
<17> legacy_binary_name = f"libbitsandbytes_cpu{DYNAMIC_LIBRARY_SUFFIX}"
<18> self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...")
<19> binary_path = package_dir / legacy_binary_name
<20> if not binary_path.exists() or torch.cuda.is_available():
<21> self.add_log_entry('')
<22> self.add_log_entry('='*48 + 'ERROR' + '='*37)
<23> self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:')
<24> self.add_log_entry('1. You need to manually override the PyTorch CUDA version. Please see: '
<25> '"https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md')
<26> self.add_log_entry('2. CUDA driver not installed')
<27> self.add_log_entry('3. CUDA not</s>
|
===========below chunk 0===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
# offset: 1
self.add_log_entry('4. You have multiple conflicting CUDA libraries')
self.add_log_entry('5. Required library not pre-compiled for this bitsandbytes release!')
self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.')
self.add_log_entry('CUDA SETUP: The CUDA version for the compile might depend on your conda install. Inspect CUDA version via `conda list | grep cuda`.')
self.add_log_entry('='*80)
self.add_log_entry('')
self.generate_instructions()
raise Exception('CUDA SETUP: Setup Failed!')
self.lib = ct.cdll.LoadLibrary(str(binary_path))
else:
self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path!s}...")
self.lib = ct.cdll.LoadLibrary(str(binary_path))
except Exception as ex:
self.add_log_entry(str(ex))
|
|
bitsandbytes.cuda_setup.main/warn_in_case_of_duplicates
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
e820409c095ea7cbb5ce156992307b84352cbf90
|
(docs) Clarify Python and CUDA Toolkit version requirement (#1076)
|
<10>:<add> "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-12.2")
<del> "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-11.2")
|
# module: bitsandbytes.cuda_setup.main
def warn_in_case_of_duplicates(results_paths: Set[Path]) -> None:
<0> if len(results_paths) > 1:
<1> warning_msg = (
<2> f"Found duplicate {CUDA_RUNTIME_LIBS} files: {results_paths}.. "
<3> "We select the PyTorch default libcudart.so, which is {torch.version.cuda},"
<4> "but this might mismatch with the CUDA version that is needed for bitsandbytes."
<5> "To override this behavior set the BNB_CUDA_VERSION=<version string, e.g. 122> environmental variable"
<6> "For example, if you want to use the CUDA version 122"
<7> "BNB_CUDA_VERSION=122 python ..."
<8> "OR set the environmental variable in your .bashrc: export BNB_CUDA_VERSION=122"
<9> "In the case of a manual override, make sure you set the LD_LIBRARY_PATH, e.g."
<10> "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-11.2")
<11> CUDASetup.get_instance().add_log_entry(warning_msg, is_warning=True)
<12>
|
===========changed ref 0===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
self.initialized = True
self.cuda_setup_log = []
binary_name, cudart_path, cc, cuda_version_string = evaluate_cuda_setup()
self.cudart_path = cudart_path
self.cuda_available = torch.cuda.is_available()
self.cc = cc
self.cuda_version_string = cuda_version_string
self.binary_name = binary_name
self.manual_override()
package_dir = Path(__file__).parent.parent
binary_path = package_dir / self.binary_name
try:
if not binary_path.exists():
self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?")
legacy_binary_name = f"libbitsandbytes_cpu{DYNAMIC_LIBRARY_SUFFIX}"
self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...")
binary_path = package_dir / legacy_binary_name
if not binary_path.exists() or torch.cuda.is_available():
self.add_log_entry('')
self.add_log_entry('='*48 + 'ERROR' + '='*37)
self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:')
self.add_log_entry('1. You need to manually override the PyTorch CUDA version. Please see: '
'"https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md')
self.add_log_entry('2. CUDA driver not installed')
self.add_log_entry('3. CUDA not installed')
self.add_log_entry('4. You have multiple conflicting CUDA libraries')
self.add_</s>
===========changed ref 1===========
# module: bitsandbytes.cuda_setup.main
class CUDASetup:
def run_cuda_setup(self):
# offset: 1
<s>DA not installed')
self.add_log_entry('4. You have multiple conflicting CUDA libraries')
self.add_log_entry('5. Required library not pre-compiled for this bitsandbytes release!')
+ self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=118`.')
- self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.')
self.add_log_entry('CUDA SETUP: The CUDA version for the compile might depend on your conda install. Inspect CUDA version via `conda list | grep cuda`.')
self.add_log_entry('='*80)
self.add_log_entry('')
self.generate_instructions()
raise Exception('CUDA SETUP: Setup Failed!')
self.lib = ct.cdll.LoadLibrary(str(binary_path))
else:
self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path!s}...")
self.lib = ct.cdll.LoadLibrary(str(binary_path))
except Exception as ex:
self.add_log_entry(str(ex))
|
bitsandbytes.nn.modules/Int8Params.__new__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
a1c0844bdbcabbd4354d73b9c5c9af8077d3c08b
|
adding whole Linear8bitLt/Linear4bit module save/load serialization (#1099)
|
<5>:<add> obj = torch.Tensor._make_subclass(cls, data, requires_grad)
<del> return torch.Tensor._make_subclass(cls, data, requires_grad)
<6>:<add> obj.CB, obj.SCB = cls.CB, cls.SCB
<add> return obj
|
# module: bitsandbytes.nn.modules
class Int8Params(torch.nn.Parameter):
def __new__(
cls,
data=None,
requires_grad=True,
has_fp16_weights=False,
CB=None,
SCB=None,
):
<0> cls.has_fp16_weights = has_fp16_weights
<1> cls.CB = None
<2> cls.SCB = None
<3> if data is None:
<4> data = torch.empty(0)
<5> return torch.Tensor._make_subclass(cls, data, requires_grad)
<6>
|
===========unchanged ref 0===========
at: bitsandbytes.nn.modules.Int8Params.cuda
self.CB = CB
self.SCB = SCB
at: torch._C._VariableFunctions
empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
|
tests.test_linear8bitlt/test_linear_serialization
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
a1c0844bdbcabbd4354d73b9c5c9af8077d3c08b
|
adding whole Linear8bitLt/Linear4bit module save/load serialization (#1099)
|
<22>:<add> if save_before_forward:
<add> bytes_8bit = torch_save_to_buffer(linear_custom)
<add>
<29>:<add>
<add> if not save_before_forward:
<add> bytes_8bit = torch_save_to_buffer(linear_custom)
|
<s>@pytest.mark.parametrize("load_before_cuda", TRUE_FALSE, ids=id_formatter("load_before_cuda"))
+ def test_linear_serialization(has_fp16_weights, serialize_before_forward, deserialize_before_cuda, force_no_igemmlt, save_before_forward, load_before_cuda):
- def test_linear_serialization(has_fp16_weights, serialize_before_forward, deserialize_before_cuda, force_no_igemmlt):
<0> linear = torch.nn.Linear(32, 96)
<1> x = torch.randn(3, 32, dtype=torch.half)
<2>
<3> linear_custom = Linear8bitLt(
<4> linear.in_features,
<5> linear.out_features,
<6> linear.bias is not None,
<7> has_fp16_weights=has_fp16_weights,
<8> threshold=6.0,
<9> )
<10> if force_no_igemmlt:
<11> linear_custom.state.force_no_igemmlt = True
<12>
<13> linear_custom.weight = bnb.nn.Int8Params(
<14> linear.weight.data.clone(), requires_grad=has_fp16_weights, has_fp16_weights=has_fp16_weights
<15> )
<16> linear_custom.bias = linear.bias
<17> linear_custom = linear_custom.cuda()
<18>
<19> if serialize_before_forward:
<20> state_dict_8bit = linear_custom.state_dict()
<21>
<22> x_first = x.clone().cuda().requires_grad_(True)
<23> fx_first = linear_custom(x_first).float()
<24> grad_proj = torch.randn_like(fx_first)
<25> (fx_first * grad_proj).mean().backward()
<26>
<27> if not serialize_before_forward:
<28> state_dict_8bit = linear_custom.state_dict()
<29>
<30> with TemporaryDirectory() as tmpdir:
<31> state_path_8bit = os.</s>
|
===========below chunk 0===========
<s>.parametrize("load_before_cuda", TRUE_FALSE, ids=id_formatter("load_before_cuda"))
+ def test_linear_serialization(has_fp16_weights, serialize_before_forward, deserialize_before_cuda, force_no_igemmlt, save_before_forward, load_before_cuda):
- def test_linear_serialization(has_fp16_weights, serialize_before_forward, deserialize_before_cuda, force_no_igemmlt):
# offset: 1
state_path = os.path.join(tmpdir, "state.pth")
torch.save(linear.state_dict(), state_path)
torch.save(state_dict_8bit, state_path_8bit)
if not has_fp16_weights:
assert os.path.getsize(state_path_8bit) < 0.5 * os.path.getsize(state_path)
new_state_dict = torch.load(state_path_8bit)
new_linear_custom = Linear8bitLt(
linear.in_features,
linear.out_features,
linear.bias is not None,
has_fp16_weights=has_fp16_weights,
threshold=6.0,
)
if force_no_igemmlt:
new_linear_custom.state.force_no_igemmlt = True
if deserialize_before_cuda:
with nullcontext() if has_fp16_weights else pytest.raises(RuntimeError):
new_linear_custom.load_state_dict(new_state_dict, strict=True)
new_linear_custom = new_linear_custom.cuda()
if not deserialize_before_cuda:
new_linear_custom.load_state_dict(new_state_dict, strict=True)
x_second = x.clone().cuda().requires_grad_(True)
fx_second = new_linear_custom(x_second).float()
(fx_second * grad_proj).mean().</s>
===========below chunk 1===========
<s>.parametrize("load_before_cuda", TRUE_FALSE, ids=id_formatter("load_before_cuda"))
+ def test_linear_serialization(has_fp16_weights, serialize_before_forward, deserialize_before_cuda, force_no_igemmlt, save_before_forward, load_before_cuda):
- def test_linear_serialization(has_fp16_weights, serialize_before_forward, deserialize_before_cuda, force_no_igemmlt):
# offset: 2
<s> fx_second = new_linear_custom(x_second).float()
(fx_second * grad_proj).mean().backward()
# if 8-bit weights were loaded before .cuda, state is incorrect anyway and RuntimeError was raised
if has_fp16_weights or not deserialize_before_cuda:
assert torch.allclose(fx_first, fx_second, atol=1e-5)
assert torch.allclose(x_first.grad, x_second.grad, atol=1e-5)
===========unchanged ref 0===========
at: _pytest.mark.structures
MARK_GEN = MarkGenerator(_ispytest=True)
at: _pytest.mark.structures.MarkGenerator
skip: _SkipMarkDecorator
skipif: _SkipifMarkDecorator
xfail: _XfailMarkDecorator
parametrize: _ParametrizeMarkDecorator
usefixtures: _UsefixturesMarkDecorator
filterwarnings: _FilterwarningsMarkDecorator
at: bitsandbytes.autograd._functions.MatmulLtState
_tile_indices: Optional[torch.Tensor] = None
force_no_igemmlt: bool = False
CB = None
CxB = None
SB = None
SCB = None
CxBt = None
SBt = None
CBt = None
subB = None
outlier_pool = None
has_accumulated_gradients = False
threshold = 0.0
idx = None
is_training = True
has_fp16_weights = True
memory_efficient_backward = False
use_pool = False
formatB = F.get_special_format_str()
at: bitsandbytes.nn.modules
Int8Params(data: Tensor=..., requires_grad: builtins.bool=...)
Linear8bitLt(input_features, output_features, bias=True, has_fp16_weights=True, memory_efficient_backward=False, threshold=0.0, index=None, device=None)
at: bitsandbytes.nn.modules.Linear8bitLt.__init__
self.state = bnb.MatmulLtState()
self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights)
at: io
BytesIO(initial_bytes: bytes=...)
at: io.BytesIO
seek(self, offset: int, whence: int=..., /) -> int
===========unchanged ref 1===========
at: os.path
join(a: StrPath, *paths: StrPath) -> str
join(a: BytesPath, *paths: BytesPath) -> bytes
getsize(filename: AnyPath) -> int
at: tempfile
TemporaryDirectory(suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=...)
at: tests.helpers
id_formatter(label: str)
TRUE_FALSE = (True, False)
at: torch._C
half: dtype = ...
===========unchanged ref 2===========
at: torch._C._VariableFunctions
randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional</s>
|
tests.test_linear4bit/test_linear_serialization
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
a1c0844bdbcabbd4354d73b9c5c9af8077d3c08b
|
adding whole Linear8bitLt/Linear4bit module save/load serialization (#1099)
|
<s>bias", TRUE_FALSE)
@pytest.mark.parametrize("compress_statistics", TRUE_FALSE)
@pytest.mark.parametrize("quant_type", ["nf4", "fp4"])
+ @pytest.mark.parametrize("save_before_forward", TRUE_FALSE)
+ def test_linear_serialization(quant_type, compress_statistics, bias, quant_storage, save_before_forward):
- def test_linear_serialization(quant_type, compress_statistics, bias, quant_storage):
<0> original_dtype = torch.float16
<1> compute_dtype = None
<2> device = "cuda"
<3> layer_shape = (300, 400)
<4>
<5> linear = torch.nn.Linear(
<6> *layer_shape, dtype=original_dtype, device="cpu"
<7> ) # original layer
<8>
<9> # Quantizing original layer
<10> linear_q = bnb.nn.Linear4bit(
<11> linear.in_features,
<12> linear.out_features,
<13> bias=bias,
<14> compute_dtype=compute_dtype,
<15> compress_statistics=compress_statistics,
<16> quant_type=quant_type,
<17> device="meta",
<18> )
<19> new_weight = bnb.nn.Params4bit(
<20> data=linear.weight, quant_type=quant_type, requires_grad=False
<21> )
<22> linear_q.weight = new_weight
<23> if bias:
<24> linear_q.bias = torch.nn.Parameter(linear.bias)
<25> linear_q = linear_q.to(device)
<26>
<27> # saving to state_dict:
<28> sd = linear_q.state_dict()
<29>
<30> # restoring from state_dict:
<31> bias_data2 = sd.pop("bias", None)
<32> weight_data2 = sd.pop("weight")
<33> weight2 = bnb.nn.Params4bit.from_prequantized(quantized_stats=sd, data=weight_data2)
<34>
<35> # creating new layer with same params</s>
|
===========below chunk 0===========
<s>)
@pytest.mark.parametrize("compress_statistics", TRUE_FALSE)
@pytest.mark.parametrize("quant_type", ["nf4", "fp4"])
+ @pytest.mark.parametrize("save_before_forward", TRUE_FALSE)
+ def test_linear_serialization(quant_type, compress_statistics, bias, quant_storage, save_before_forward):
- def test_linear_serialization(quant_type, compress_statistics, bias, quant_storage):
# offset: 1
linear_q2 = bnb.nn.Linear4bit(
linear.in_features,
linear.out_features,
bias=bias,
compute_dtype=compute_dtype,
compress_statistics=compress_statistics,
quant_type=quant_type,
device="meta",
)
# loading weights from state_dict:
linear_q2.weight = weight2
if bias:
linear_q2.bias = torch.nn.Parameter(bias_data2)
linear_q2 = linear_q2.to(device)
# MATCHING
a, b = linear_q.weight, linear_q2.weight
# Quantizing original layer with specified quant_storage type
linear_qs = bnb.nn.Linear4bit(
linear.in_features,
linear.out_features,
bias=bias,
compute_dtype=compute_dtype,
compress_statistics=compress_statistics,
quant_type=quant_type,
quant_storage=storage[quant_storage],
device="meta",
)
linear_qs.weight = bnb.nn.Params4bit(
data=linear.weight,
requires_grad=False,
quant_type=quant_type,
quant_storage=storage[quant_storage],
)
if bias:
linear_qs.bias = torch.nn.Parameter(linear.bias)
linear_qs = linear_qs.to(device)
assert a.device == b.device
assert a.dtype == b.dtype
</s>
===========below chunk 1===========
<s>)
@pytest.mark.parametrize("compress_statistics", TRUE_FALSE)
@pytest.mark.parametrize("quant_type", ["nf4", "fp4"])
+ @pytest.mark.parametrize("save_before_forward", TRUE_FALSE)
+ def test_linear_serialization(quant_type, compress_statistics, bias, quant_storage, save_before_forward):
- def test_linear_serialization(quant_type, compress_statistics, bias, quant_storage):
# offset: 2
<s>_qs.to(device)
assert a.device == b.device
assert a.dtype == b.dtype
assert torch.equal(a, b)
q0 = a.quant_state
q1 = b.quant_state
for attr in ("code", "dtype", "blocksize", "absmax"):
c, d = getattr(q0, attr), getattr(q1, attr)
if isinstance(c, torch.Tensor):
assert torch.equal(c, d)
else:
assert c == d, f"{c} != {d}"
if q0.state2 is not None:
for attr in ("code", "dtype", "blocksize", "absmax"):
c, d = getattr(q0.state2, attr), getattr(q1.state2, attr)
if isinstance(c, torch.Tensor):
assert torch.equal(c, d)
else:
assert c == d, f"{c} != {d}"
if bias:
a, b = linear_q.bias, linear_q2.bias
assert a.device == b.device
assert a.dtype == b.dtype
assert torch.equal(a, b)
# Forward test
x = torch.rand(42, layer_shape[0], device=device)
a = linear_q(x)
b = linear_q2(x)
c = linear_qs</s>
===========below chunk 2===========
<s>)
@pytest.mark.parametrize("compress_statistics", TRUE_FALSE)
@pytest.mark.parametrize("quant_type", ["nf4", "fp4"])
+ @pytest.mark.parametrize("save_before_forward", TRUE_FALSE)
+ def test_linear_serialization(quant_type, compress_statistics, bias, quant_storage, save_before_forward):
- def test_linear_serialization(quant_type, compress_statistics, bias, quant_storage):
# offset: 3
<s>)
assert a.device == b.device
assert a.dtype == b.dtype
assert a.device == c.device
assert a.dtype == c.dtype
assert torch.equal(a, b)
assert torch.equal(a, c)
# Test moving to CPU and back to GPU
linear_q2.to("cpu")
linear_q2.to(device)
d = linear_qs(x)
assert c.dtype == d.dtype
assert c.device == d.device
assert torch.equal(c, d)
# Saved size ratio test. Target set for layer_shape == (300, 400) w/ bias
with TemporaryDirectory() as tmpdir:
state_path_4bit = os.path.join(tmpdir, "state_4bit.pth")
state_path = os.path.join(tmpdir, "state.pth")
torch.save(linear.state_dict(), state_path)
torch.save(linear_q.state_dict(), state_path_4bit)
size_orig, size_4 = (
os.path.getsize(state_path),
os.path.getsize(state_path_4bit),
)
size_ratio = size_4 / size_orig
target_compression = (
0.143 if original_dtype == torch.float32 else 0.29
) # these numbers get lower as weight shape incre</s>
===========below chunk 3===========
<s>)
@pytest.mark.parametrize("compress_statistics", TRUE_FALSE)
@pytest.mark.parametrize("quant_type", ["nf4", "fp4"])
+ @pytest.mark.parametrize("save_before_forward", TRUE_FALSE)
+ def test_linear_serialization(quant_type, compress_statistics, bias, quant_storage, save_before_forward):
- def test_linear_serialization(quant_type, compress_statistics, bias, quant_storage):
# offset: 4
<s> ratio_error_msg = f"quantized_size {size_4:,} is larger on disk than {target_compression:.2%} of original size {size_orig:,}"
assert size_ratio < target_compression, ratio_error_msg
|
|
tests.test_generation/get_4bit_config
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
62249b4a7752ba35d75f0848a2e380e5d650da5c
|
Soft-require `transformers` in tests
|
<0>:<add> return transformers.BitsAndBytesConfig(
<del> return BitsAndBytesConfig(
|
# module: tests.test_generation
def get_4bit_config():
<0> return BitsAndBytesConfig(
<1> load_in_4bit=True,
<2> load_in_8bit=False,
<3> llm_int8_threshold=6.0,
<4> llm_int8_has_fp16_weight=False,
<5> bnb_4bit_compute_dtype=torch.float16,
<6> bnb_4bit_use_double_quant=True,
<7> bnb_4bit_quant_type='nf4',
<8> )
<9>
|
===========unchanged ref 0===========
at: torch._C
float16: dtype = ...
|
tests.test_generation/get_model_and_tokenizer
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
62249b4a7752ba35d75f0848a2e380e5d650da5c
|
Soft-require `transformers` in tests
|
<6>:<add> model = transformers.AutoModelForCausalLM.from_pretrained(model_name_or_path,
<del> model = AutoModelForCausalLM.from_pretrained(model_name_or_path,
|
# module: tests.test_generation
def get_model_and_tokenizer(config):
<0> model_name_or_path, quant_type = config
<1> bnb_config = get_4bit_config()
<2> if quant_type == '16bit':
<3> bnb_config.load_in_4bit = False
<4> else:
<5> bnb_config.bnb_4bit_quant_type= quant_type
<6> model = AutoModelForCausalLM.from_pretrained(model_name_or_path,
<7> quantization_config=bnb_config,
<8> max_memory={0:'48GB'},
<9> device_map='auto',
<10> torch_dtype=torch.bfloat16
<11> ).eval()
<12>
<13> tokenizer = transformers.AutoTokenizer.from_pretrained(model_name_or_path)
<14>
<15> return model, tokenizer
<16>
|
===========unchanged ref 0===========
at: tests.test_generation
transformers = pytest.importorskip("transformers")
at: tests.test_generation.get_model_and_tokenizer
model_name_or_path, quant_type = config
model_name_or_path, quant_type = config
bnb_config = get_4bit_config()
at: torch._C
bfloat16: dtype = ...
===========changed ref 0===========
# module: tests.test_generation
def get_4bit_config():
+ return transformers.BitsAndBytesConfig(
- return BitsAndBytesConfig(
load_in_4bit=True,
load_in_8bit=False,
llm_int8_threshold=6.0,
llm_int8_has_fp16_weight=False,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type='nf4',
)
|
tests.conftest/pytest_runtest_call
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
2416dd3693fc2f1aa93fe300b71bf14eb1184aa9
|
Add additional guard for "no NVIDIA driver"
|
<10>:<add> except RuntimeError as re:
<add> # CUDA-enabled Torch build, but no CUDA-capable device found
<add> if "Found no NVIDIA driver on your system" in str(re):
<add> pytest.skip("No NVIDIA driver found")
<add> raise
|
# module: tests.conftest
def pytest_runtest_call(item):
<0> try:
<1> item.runtest()
<2> except NotImplementedError as nie:
<3> if "NO_CUBLASLT" in str(nie):
<4> pytest.skip("CUBLASLT not available")
<5> raise
<6> except AssertionError as ae:
<7> if str(ae) == "Torch not compiled with CUDA enabled":
<8> pytest.skip("Torch not compiled with CUDA enabled")
<9> raise
<10>
|
===========unchanged ref 0===========
at: _pytest.outcomes
_WithException(*, allow_module_level: bool=False, msg: Optional[str]=None) -> NoReturn
|
bitsandbytes.diagnostics.main/sanity_check
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
6a5a18a1d4badcd0d888c6de752599a05c820ade
|
Sanity check: Add check for `lib` being None
|
<0>:<add> from bitsandbytes.cextension import lib
<add>
<add> if lib is None:
<add> print_dedented(
<add> """
<add> Couldn't load the bitsandbytes library, likely due to missing binaries.
<add> Please ensure bitsandbytes is properly installed.
<add>
<add> For source installations, compile the binaries with `cmake -DCOMPUTE_BACKEND=cuda -S .`.
<add> See the documentation for more details if needed.
<add>
<add> Trying a simple check anyway, but this will likely fail...
<add> """
<add> )
<add>
|
# module: bitsandbytes.diagnostics.main
def sanity_check():
<0> from bitsandbytes.optim import Adam
<1>
<2> p = torch.nn.Parameter(torch.rand(10, 10).cuda())
<3> a = torch.rand(10, 10).cuda()
<4> p1 = p.data.sum().item()
<5> adam = Adam([p])
<6> out = a * p
<7> loss = out.sum()
<8> loss.backward()
<9> adam.step()
<10> p2 = p.data.sum().item()
<11> assert p1 != p2
<12>
|
===========unchanged ref 0===========
at: bitsandbytes.cextension
lib = get_native_library()
lib = None
at: bitsandbytes.diagnostics.utils
print_dedented(text)
|
bitsandbytes.diagnostics.cuda/find_cuda_libraries_in_path_list
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
79d1cccc7fb90eaa3d65aebabe99050e343306b3
|
Improve filtering for values that are surely not paths
|
<3>:<add> if os.sep not in dir_string:
<add> continue
<5>:<add> try:
<add> if not dir.exists():
<del> if not dir.exists():
<6>:<add> logger.warning(f"The directory listed in your path is found to be non-existent: {dir}")
<del> logger.warning(f"The directory listed in your path is found to be non-existent: {dir}")
<7>:<add> continue
<del> continue
<8>:<add> except OSError: # Assume an esoteric error trying to poke at the directory
<add> pass
|
# module: bitsandbytes.diagnostics.cuda
def find_cuda_libraries_in_path_list(paths_list_candidate: str) -> Iterable[Path]:
<0> for dir_string in paths_list_candidate.split(os.pathsep):
<1> if not dir_string:
<2> continue
<3> try:
<4> dir = Path(dir_string)
<5> if not dir.exists():
<6> logger.warning(f"The directory listed in your path is found to be non-existent: {dir}")
<7> continue
<8> for lib_pattern in CUDA_RUNTIME_LIB_PATTERNS:
<9> for pth in dir.glob(lib_pattern):
<10> if pth.is_file():
<11> yield pth
<12> except PermissionError:
<13> pass
<14>
|
===========unchanged ref 0===========
at: bitsandbytes.diagnostics.cuda
CUDA_RUNTIME_LIB_PATTERNS = (
"cudart64*.dll", # Windows
"libcudart*.so*", # libcudart.so, libcudart.so.11.0, libcudart.so.12.0, libcudart.so.12.1, libcudart.so.12.2 etc.
"nvcuda*.dll", # Windows
)
logger = logging.getLogger(__name__)
at: logging.Logger
warning(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None
at: pathlib
Path()
at: pathlib.Path
__slots__ = ()
exists() -> bool
at: typing
Iterable = _alias(collections.abc.Iterable, 1)
|
bitsandbytes.diagnostics.cuda/is_relevant_candidate_env_var
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
79d1cccc7fb90eaa3d65aebabe99050e343306b3
|
Improve filtering for values that are surely not paths
|
<4>:<add> and env_var not in CUDART_PATH_IGNORED_ENVVARS # not ignored
<5>:<add> and "BASH_FUNC" not in env_var # not a bash function defined via envvar
<add> and "\n" not in value # likely e.g. a script or something?
<del> and env_var not in CUDART_PATH_IGNORED_ENVVARS # not ignored
|
# module: bitsandbytes.diagnostics.cuda
def is_relevant_candidate_env_var(env_var: str, value: str) -> bool:
<0> return (
<1> env_var in CUDART_PATH_PREFERRED_ENVVARS # is a preferred location
<2> or (
<3> os.sep in value # might contain a path
<4> and "CONDA" not in env_var # not another conda envvar
<5> and env_var not in CUDART_PATH_IGNORED_ENVVARS # not ignored
<6> )
<7> )
<8>
|
===========unchanged ref 0===========
at: bitsandbytes.diagnostics.cuda
CUDART_PATH_PREFERRED_ENVVARS = ("CONDA_PREFIX", "LD_LIBRARY_PATH")
===========changed ref 0===========
# module: bitsandbytes.diagnostics.cuda
def find_cuda_libraries_in_path_list(paths_list_candidate: str) -> Iterable[Path]:
for dir_string in paths_list_candidate.split(os.pathsep):
if not dir_string:
continue
+ if os.sep not in dir_string:
+ continue
try:
dir = Path(dir_string)
+ try:
+ if not dir.exists():
- if not dir.exists():
+ logger.warning(f"The directory listed in your path is found to be non-existent: {dir}")
- logger.warning(f"The directory listed in your path is found to be non-existent: {dir}")
+ continue
- continue
+ except OSError: # Assume an esoteric error trying to poke at the directory
+ pass
for lib_pattern in CUDA_RUNTIME_LIB_PATTERNS:
for pth in dir.glob(lib_pattern):
if pth.is_file():
yield pth
except PermissionError:
pass
|
bitsandbytes.functional/get_4bit_type
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
0c64a0d8c377734377fa591226e3b2c65fa8aaf8
|
Merge pull request #1148 from stevhliu/fsdp-qlora
|
# module: bitsandbytes.functional
def get_4bit_type(typename, device=None, blocksize=64):
<0> if device is None:
<1> device = "cuda"
<2> data = None
<3> if typename == "nf4":
<4> """ Implements the NF4 data type.
<5>
<6> Constructs a quantization data type where each bin has equal area under a standard normal distribution N(0, 1) that
<7> is normalized into the range [-1, 1].
<8>
<9> For more information read the paper: QLoRA: Efficient Finetuning of Quantized LLMs (https://arxiv.org/abs/2305.14314)
<10>
<11> Implementation of the NF4 data type in bitsandbytes can be found in the `create_normal_map` function in
<12> the `functional.py` file: https://github.com/TimDettmers/bitsandbytes/blob/main/bitsandbytes/functional.py#L236.
<13> """
<14> data = [
<15> -1.0,
<16> -0.6961928009986877,
<17> -0.5250730514526367,
<18> -0.39491748809814453,
<19> -0.28444138169288635,
<20> -0.18477343022823334,
<21> -0.09105003625154495,
<22> 0.0,
<23> 0.07958029955625534,
<24> 0.16093020141124725,
<25> 0.24611230194568634,
<26> 0.33791524171829224,
<27> 0.44070982933044434,
<28> 0.5626170039176941,
<29> 0.7229568362236023,
<30> 1.0,
<31> ]
<32> elif typename == "fp4":
<33> # 0b000 = 0
<34> # 0b001 = 0.0625
<35> # 0b010 = 8
</s>
|
===========below chunk 0===========
# module: bitsandbytes.functional
def get_4bit_type(typename, device=None, blocksize=64):
# offset: 1
# 0b100 = 4
# 0b101 = 6
# 0b110 = 2
# 0b111 = 3
# can also be created with bnb.functional.create_fp8_map(signed=True, exponent_bits=2, precision_bits=1, total_bits=4)
data = [0, 0.0625, 8.0, 12.0, 4.0, 6.0, 2.0, 3.0, -0, -0.0625, -8.0, -12.0, -4.0, -6.0, -2.0, -3.0]
elif typename == "int4":
data = [7, 6, 5, 4, 3, 2, 1, 0, -0, -1, -2, -3, -4, -5, -6, -7]
elif typename == "af4":
# Taken from: NF4 Isn't Information Theoretically Optimal (and that's Good)
# https://arxiv.org/abs/2306.06965
if blocksize == 64:
data = [
-1.0,
-0.69441008,
-0.51243739,
-0.3736951,
-0.25607552,
-0.14982478,
-0.04934812,
0.0,
0.04273164,
0.12934483,
0.21961274,
0.31675666,
0.42563882,
0.55496234,
0.72424863,
1.0,
][::-1]
else:
raise NotImplementedError("4-bit AbnormalFloats currently only support blocksize 64.")
if data is None:
raise NotImplementedError(f"Typename {typename} not supported")
data = Tensor(</s>
===========below chunk 1===========
# module: bitsandbytes.functional
def get_4bit_type(typename, device=None, blocksize=64):
# offset: 2
<s>
if data is None:
raise NotImplementedError(f"Typename {typename} not supported")
data = Tensor(data)
data /= data.abs().max()
assert data.numel() == 16
return data.to(device)
===========unchanged ref 0===========
at: torch._C._VariableFunctions
tensor(data: Any, dtype: Optional[_dtype]=None, device: Device=None, requires_grad: _bool=False) -> Tensor
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.