path
stringlengths
9
117
type
stringclasses
2 values
project
stringclasses
10 values
commit_hash
stringlengths
40
40
commit_message
stringlengths
1
137
ground_truth
stringlengths
0
2.74k
main_code
stringlengths
102
3.37k
context
stringlengths
0
14.7k
tests.test_functional/test_blockwise_cpu_large
Modified
bitsandbytes-foundation~bitsandbytes
df86625a9399d16d6fb2e3bab6bb7bcc729f3b7d
Isolated CUDASetup logging; all tests green.
<4>:<add> for hidden in [128]:#, 14336]: <del> for hidden in [128, 14336]:
# module: tests.test_functional def test_blockwise_cpu_large(): <0> diffs = [] <1> reldiffs = [] <2> batch = 128 <3> seq = 128 <4> for hidden in [128, 14336]: <5> for blocksize in [4096, 16384]: <6> for i in range(2): <7> A1 = torch.randn(batch, seq, hidden, device='cpu') <8> t0 = time.time() <9> C, S = F.quantize_blockwise(A1, blocksize=blocksize) <10> A2 = F.dequantize_blockwise(C, S, blocksize=blocksize) <11> print(time.time() - t0) <12> diff = torch.abs(A1 - A2) <13> reldiff = diff / torch.abs(A1 + 1e-8) <14> diffs.append(diff.mean().item()) <15> reldiffs.append(reldiff.mean().item()) <16> assert diffs[-1] < 0.011 <17>
===========changed ref 0=========== # module: tests.test_functional def test_matmuls(): + a = torch.randn(256, 512).half().cuda() - a = torch.randn(256, 256).half().cuda() + b = torch.randn(256, 512).half().cuda() - b = torch.randn(256, 256).half().cuda() + c1 = torch.matmul(a, b.t()) - c1 = torch.matmul(a, b) c2 = bnb.matmul(a, b) + c3 = bnb.matmul_cublas(a, b.t()) - c3 = bnb.matmul(a, b) err1 = torch.abs(c1 - c2).mean().item() err2 = torch.abs(c1 - c3).mean().item() assert err1 < 0.2 assert err2 < 0.2 + print(err1, err2) ===========changed ref 1=========== # module: tests.test_functional - @pytest.mark.parametrize( - "dim1, dim2, dtype, orderA, orderOut", values, ids=names - ) - def test_transform_to_row(dim1, dim2, dtype, orderA, orderOut): - for i in range(1): - A = torch.randint(-127, 127, size=(dim1, dim2), device="cuda").to(dtype) - - out2, S2 = F.transform(A, to_order=orderA) - A2, S3 = F.transform(out2, from_order=orderA, to_order="row", state=S2) - assert A2.shape[0] == A.shape[0] - assert A2.shape[1] == A.shape[1] - - print("") - print(A) - print(out2) - print(A2) - ===========changed ref 2=========== # module: bitsandbytes.cextension + class CUDASetup(object): + _instance = None + ===========changed ref 3=========== # module: bitsandbytes.cextension - class CUDALibrary_Singleton(object): - _instance = None - ===========changed ref 4=========== # module: bitsandbytes.cextension + class CUDASetup(object): + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 5=========== # module: bitsandbytes.cextension - class CUDALibrary_Singleton(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 6=========== # module: bitsandbytes.cextension + class CUDASetup(object): + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 7=========== # module: bitsandbytes.cextension + class CUDASetup(object): + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 8=========== # module: bitsandbytes.cextension - class CUDALibrary_Singleton(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 9=========== # module: bitsandbytes.cextension + class CUDASetup(object): + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 10=========== # module: tests.test_cuda_setup_evaluator - @pytest.mark.parametrize("test_input, expected", HAPPY_PATH__LD_LIB_TEST_PATHS) - def test_determine_cuda_runtime_lib_path__happy_path( - tmp_path, test_input: str, expected: str - ): - for path in extract_candidate_paths(test_input): - path.mkdir() - (path / CUDA_RUNTIME_LIB).touch() - assert determine_cuda_runtime_lib_path(test_input) == expected - ===========changed ref 11=========== # module: tests.test_functional def test_spmm_bench(): batch = 2 model = 1024 * 1 hidden = model * 4 seq = 1024 dim1 = batch * seq dim2 = model dim3 = hidden threshold = 4 A = torch.randn(dim1, dim2, device="cuda").half() B = torch.randn(dim2, dim3, device="cuda").half() for i in range(10): + C1 = bnb.matmul(A, B.t()) - C1 = bnb.matmul(A, B) torch.cuda.synchronize() t0 = time.time() for i in range(k): + C1 = bnb.matmul(A, B.t()) - C1 = bnb.matmul(A, B) torch.cuda.synchronize() t8 = time.time() - t0 idx = torch.abs(A) >= threshold nnz = (idx == 1).sum().item() print(nnz / idx.numel()) rows, cols = torch.where(idx) values = A[idx] cooA = F.COOSparseTensor( A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values ) for i in range(10): out2 = F.spmm_coo(cooA, B) torch.cuda.synchronize() t0 = time.time() for i in range(k): out2 = F.spmm_coo(cooA, B) torch.cuda.synchronize() tsp = time.time() - t0 print(tsp, t8) print(tsp / t8) ===========changed ref 12=========== # module: bitsandbytes.nn.modules - class Linear8bit(nn.Linear): - def __init__( - self, - input_features, - output_features, - bias=True, - quant_type="vector", - index=None, - args=None, - sparse_decomp=False, - ): - super(Linear8bit, self).__init__(input_features, output_features, bias) - self.quant_type = quant_type - self.index = index - self.args = args - self.iter = 0 - ===========changed ref 13=========== # module: tests.test_cuda_setup_evaluator - @pytest.mark.parametrize("test_input", UNHAPPY_PATH__LD_LIB_TEST_PATHS) - def test_determine_cuda_runtime_lib_path__unhappy_path(tmp_path, test_input: str): - test_input = tmp_path / test_input - (test_input / CUDA_RUNTIME_LIB).touch() - with pytest.raises(FileNotFoundError) as err_info: - determine_cuda_runtime_lib_path(test_input) - assert all(match in err_info for match in {"duplicate", CUDA_RUNTIME_LIB}) -
bitsandbytes.cextension/CUDASetup.initialize
Modified
bitsandbytes-foundation~bitsandbytes
a371be302ddbdf3f36acef1a6fe365672099c9d9
Added CUDA SETUP instruction generator.
<1>:<add> self.lib = None <3>:<add> binary_name, cudart_path, cuda, cc, cuda_version_string = evaluate_cuda_setup() <add> self.cudart_path = cudart_path <add> self.cuda = cuda <add> self.cc = cc <add> self.cuda_version_string = cuda_version_string <add> <del> binary_name = evaluate_cuda_setup() <9>:<add> self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?") <del> self.add_log_entry(f"CUDA SETUP: TODO: compile library for specific version: {binary_name}") <14>:<add> self.add_log_entry('') <add> self.add_log_entry('='*48 + 'ERROR' + '='*37) <add> self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:') <add> self.add_log_entry('1. CUDA driver not installed') <add> self.add_log_entry('2. CUDA not installed') <add> self.add_log_entry('3. You
# module: bitsandbytes.cextension class CUDASetup(object): def initialize(self): <0> self.cuda_setup_log = [] <1> <2> from .cuda_setup.main import evaluate_cuda_setup <3> binary_name = evaluate_cuda_setup() <4> package_dir = Path(__file__).parent <5> binary_path = package_dir / binary_name <6> <7> try: <8> if not binary_path.exists(): <9> self.add_log_entry(f"CUDA SETUP: TODO: compile library for specific version: {binary_name}") <10> legacy_binary_name = "libbitsandbytes.so" <11> self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...") <12> binary_path = package_dir / legacy_binary_name <13> if not binary_path.exists(): <14> self.add_log_entry('CUDA SETUP: CUDA detection failed. Either CUDA driver not installed, CUDA not installed, or you have multiple conflicting CUDA libraries!') <15> self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.') <16> self.print_log_stack() <17> raise Exception('CUDA SETUP: Setup Failed!') <18> self.lib = ct.cdll.LoadLibrary(binary_path) <19> else: <20> self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path}...") <21> self.lib = ct.cdll.LoadLibrary(binary_path) <22> except: <23> self.print_log_stack() <24>
===========unchanged ref 0=========== at: bitsandbytes.cextension.CUDASetup _instance = None add_log_entry(msg, is_warning=False) at: bitsandbytes.cextension.CUDASetup.initialize self.cudart_path = cudart_path self.cuda = cuda self.cuda_version_string = cuda_version_string
bitsandbytes.cuda_setup.main/evaluate_cuda_setup
Modified
bitsandbytes-foundation~bitsandbytes
a371be302ddbdf3f36acef1a6fe365672099c9d9
Added CUDA SETUP instruction generator.
# module: bitsandbytes.cuda_setup.main def evaluate_cuda_setup(): <0> # we remove this for now and see how things go <1> #print('') <2> #print('='*35 + 'BUG REPORT' + '='*35) <3> #print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues') <4> #print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link') <5> #print('='*80) <6> #if not torch.cuda.is_available(): <7> #print('No GPU detected. Loading CPU library...') <8> #return binary_name <9> <10> binary_name = "libbitsandbytes_cpu.so" <11> <12> cuda_setup = CUDASetup.get_instance() <13> cudart_path = determine_cuda_runtime_lib_path() <14> if cudart_path is None: <15> cuda_setup.add_log_entry("WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!", is_warning=True) <16> return binary_name <17> <18> cuda_setup.add_log_entry((f"CUDA SETUP: CUDA runtime path found: {cudart_path}")) <19> cuda = get_cuda_lib_handle() <20> cc = get_compute_capability(cuda) <21> cuda_setup.add_log_entry(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}") <22> cuda_version_string = get_cuda_version(cuda, cudart_path) <23> <24> <25> if cc == '': <26> cuda_setup.add_log_entry("WARNING: No</s>
===========below chunk 0=========== # module: bitsandbytes.cuda_setup.main def evaluate_cuda_setup(): # offset: 1 return binary_name # 7.5 is the minimum CC vor cublaslt has_cublaslt = cc in ["7.5", "8.0", "8.6"] # TODO: # (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible) # (2) Multiple CUDA versions installed # we use ls -l instead of nvcc to determine the cuda version # since most installations will have the libcudart.so installed, but not the compiler cuda_setup.add_log_entry(f'CUDA SETUP: Detected CUDA version {cuda_version_string}') def get_binary_name(): "if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so" bin_base_name = "libbitsandbytes_cuda" if has_cublaslt: return f"{bin_base_name}{cuda_version_string}.so" else: return f"{bin_base_name}{cuda_version_string}_nocublaslt.so" binary_name = get_binary_name() return binary_name ===========unchanged ref 0=========== at: bitsandbytes.cextension CUDASetup() at: bitsandbytes.cextension.CUDASetup _instance = None get_instance() at: bitsandbytes.cuda_setup.main get_cuda_version(cuda, cudart_path) get_cuda_lib_handle() get_compute_capability(cuda) at: bitsandbytes.cuda_setup.paths determine_cuda_runtime_lib_path() -> Union[Path, None] ===========changed ref 0=========== # module: bitsandbytes.cextension lib = CUDASetup.get_instance().lib try: + if lib is None and torch.cuda.is_available(): + CUDASetup.get_instance().generate_instructions() + CUDASetup.get_instance().print_log_stack() + raise RuntimeError(''' + CUDA Setup failed despite GPU being available. Inspect the CUDA SETUP outputs to fix your environment! + If you cannot find any issues and suspect a bug, please open an issue with detals about your environment: + https://github.com/TimDettmers/bitsandbytes/issues''') lib.cadam32bit_g32 lib.get_context.restype = ct.c_void_p lib.get_cusparse.restype = ct.c_void_p COMPILED_WITH_CUDA = True except AttributeError: warn( "The installed version of bitsandbytes was compiled without GPU support. " "8-bit optimizers and GPU quantization are unavailable." ) COMPILED_WITH_CUDA = False ===========changed ref 1=========== # module: bitsandbytes.cextension class CUDASetup(object): def initialize(self): self.cuda_setup_log = [] + self.lib = None from .cuda_setup.main import evaluate_cuda_setup + binary_name, cudart_path, cuda, cc, cuda_version_string = evaluate_cuda_setup() + self.cudart_path = cudart_path + self.cuda = cuda + self.cc = cc + self.cuda_version_string = cuda_version_string + - binary_name = evaluate_cuda_setup() package_dir = Path(__file__).parent binary_path = package_dir / binary_name try: if not binary_path.exists(): + self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?") - self.add_log_entry(f"CUDA SETUP: TODO: compile library for specific version: {binary_name}") legacy_binary_name = "libbitsandbytes.so" self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...") binary_path = package_dir / legacy_binary_name if not binary_path.exists(): + self.add_log_entry('') + self.add_log_entry('='*48 + 'ERROR' + '='*37) + self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:') + self.add_log_entry('1. CUDA driver not installed') + self.add_log_entry('2. CUDA not installed') + self.add_log_entry('3. You have multiple conflicting CUDA libraries') + self.add_log_entry('4. Required library not pre-compiled for this bitsandbytes release!') - self.add_log_entry('CUDA SETUP: CUDA detection failed. Either CUDA driver</s> ===========changed ref 2=========== # module: bitsandbytes.cextension class CUDASetup(object): def initialize(self): # offset: 1 <s>andbytes release!') - self.add_log_entry('CUDA SETUP: CUDA detection failed. Either CUDA driver not installed, CUDA not installed, or you have multiple conflicting CUDA libraries!') self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.') + self.add_log_entry('='*80) + self.add_log_entry('') + self.generate_instructions() self.print_log_stack() raise Exception('CUDA SETUP: Setup Failed!') self.lib = ct.cdll.LoadLibrary(binary_path) else: self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path}...") self.lib = ct.cdll.LoadLibrary(binary_path) except: self.print_log_stack()
tests.test_cuda_setup_evaluator/test_full_system
Modified
bitsandbytes-foundation~bitsandbytes
a371be302ddbdf3f36acef1a6fe365672099c9d9
Added CUDA SETUP instruction generator.
<6>:<del> ls_output, err = bnb.utils.execute_and_return( <7>:<del> f'ls -l {os.environ["CONDA_PREFIX"]}/lib/libcudart.so' <8>:<del> ) <9>:<del> major, minor, revision = ( <10>:<add> ls_output, err = bnb.utils.execute_and_return(f'ls -l {os.environ["CONDA_PREFIX"]}/lib/libcudart.so') <add> major, minor, revision = (ls_output.split(" ")[-1].replace("libcudart.so.", "").split(".")) <del> ls_output.split(" ")[-1].replace("libcudart.so.", "").split(".") <11>:<del> ) <27>:<add> binary_name, cudart_path, cuda, cc, cuda_version_string = evaluate_cuda_setup() <del> binary_name = evaluate_cuda_setup()
# module: tests.test_cuda_setup_evaluator def test_full_system(): <0> ## this only tests the cuda version and not compute capability <1> <2> # if CONDA_PREFIX exists, it has priority before all other env variables <3> # but it does not contain the library directly, so we need to look at the a sub-folder <4> version = "" <5> if "CONDA_PREFIX" in os.environ: <6> ls_output, err = bnb.utils.execute_and_return( <7> f'ls -l {os.environ["CONDA_PREFIX"]}/lib/libcudart.so' <8> ) <9> major, minor, revision = ( <10> ls_output.split(" ")[-1].replace("libcudart.so.", "").split(".") <11> ) <12> version = float(f"{major}.{minor}") <13> <14> if version == "" and "LD_LIBRARY_PATH" in os.environ: <15> ld_path = os.environ["LD_LIBRARY_PATH"] <16> paths = ld_path.split(":") <17> version = "" <18> for p in paths: <19> if "cuda" in p: <20> idx = p.rfind("cuda-") <21> version = p[idx + 5 : idx + 5 + 4].replace("/", "") <22> version = float(version) <23> break <24> <25> <26> assert version > 0 <27> binary_name = evaluate_cuda_setup() <28> binary_name = binary_name.replace("libbitsandbytes_cuda", "") <29> assert binary_name.startswith(str(version).replace(".", "")) <30>
===========unchanged ref 0=========== at: bitsandbytes.cuda_setup.main evaluate_cuda_setup() at: bitsandbytes.utils execute_and_return(command_string: str) -> Tuple[str, str] at: os environ = _createenviron() ===========changed ref 0=========== # module: bitsandbytes.cuda_setup.main def evaluate_cuda_setup(): # we remove this for now and see how things go #print('') #print('='*35 + 'BUG REPORT' + '='*35) #print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues') #print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link') #print('='*80) #if not torch.cuda.is_available(): #print('No GPU detected. Loading CPU library...') #return binary_name binary_name = "libbitsandbytes_cpu.so" cuda_setup = CUDASetup.get_instance() cudart_path = determine_cuda_runtime_lib_path() if cudart_path is None: cuda_setup.add_log_entry("WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!", is_warning=True) return binary_name cuda_setup.add_log_entry((f"CUDA SETUP: CUDA runtime path found: {cudart_path}")) cuda = get_cuda_lib_handle() cc = get_compute_capability(cuda) cuda_setup.add_log_entry(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}") cuda_version_string = get_cuda_version(cuda, cudart_path) if cc == '': cuda_setup.add_log_entry("WARNING: No GPU detected! Check your CUDA paths. Processing to load CPU-only library...", is_warning=True) return</s> ===========changed ref 1=========== # module: bitsandbytes.cuda_setup.main def evaluate_cuda_setup(): # offset: 1 <s>WARNING: No GPU detected! Check your CUDA paths. Processing to load CPU-only library...", is_warning=True) return binary_name # 7.5 is the minimum CC vor cublaslt has_cublaslt = cc in ["7.5", "8.0", "8.6"] # TODO: # (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible) # (2) Multiple CUDA versions installed # we use ls -l instead of nvcc to determine the cuda version # since most installations will have the libcudart.so installed, but not the compiler cuda_setup.add_log_entry(f'CUDA SETUP: Detected CUDA version {cuda_version_string}') def get_binary_name(): "if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so" bin_base_name = "libbitsandbytes_cuda" if has_cublaslt: return f"{bin_base_name}{cuda_version_string}.so" else: return f"{bin_base_name}{cuda_version_string}_nocublaslt.so" binary_name = get_binary_name() + return binary_name, cudart_path, cuda, cc, cuda_version_string - return binary_name ===========changed ref 2=========== # module: bitsandbytes.cextension lib = CUDASetup.get_instance().lib try: + if lib is None and torch.cuda.is_available(): + CUDASetup.get_instance().generate_instructions() + CUDASetup.get_instance().print_log_stack() + raise RuntimeError(''' + CUDA Setup failed despite GPU being available. Inspect the CUDA SETUP outputs to fix your environment! + If you cannot find any issues and suspect a bug, please open an issue with detals about your environment: + https://github.com/TimDettmers/bitsandbytes/issues''') lib.cadam32bit_g32 lib.get_context.restype = ct.c_void_p lib.get_cusparse.restype = ct.c_void_p COMPILED_WITH_CUDA = True except AttributeError: warn( "The installed version of bitsandbytes was compiled without GPU support. " "8-bit optimizers and GPU quantization are unavailable." ) COMPILED_WITH_CUDA = False ===========changed ref 3=========== # module: bitsandbytes.cextension class CUDASetup(object): def initialize(self): self.cuda_setup_log = [] + self.lib = None from .cuda_setup.main import evaluate_cuda_setup + binary_name, cudart_path, cuda, cc, cuda_version_string = evaluate_cuda_setup() + self.cudart_path = cudart_path + self.cuda = cuda + self.cc = cc + self.cuda_version_string = cuda_version_string + - binary_name = evaluate_cuda_setup() package_dir = Path(__file__).parent binary_path = package_dir / binary_name try: if not binary_path.exists(): + self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?") - self.add_log_entry(f"CUDA SETUP: TODO: compile library for specific version: {binary_name}") legacy_binary_name = "libbitsandbytes.so" self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...") binary_path = package_dir / legacy_binary_name if not binary_path.exists(): + self.add_log_entry('') + self.add_log_entry('='*48 + 'ERROR' + '='*37) + self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:') + self.add_log_entry('1. CUDA driver not installed') + self.add_log_entry('2. CUDA not installed') + self.add_log_entry('3. You have multiple conflicting CUDA libraries') + self.add_log_entry('4. Required library not pre-compiled for this bitsandbytes release!') - self.add_log_entry('CUDA SETUP: CUDA detection failed. Either CUDA driver</s>
bitsandbytes.cuda_setup.paths/warn_in_case_of_duplicates
Modified
bitsandbytes-foundation~bitsandbytes
c584482f1f13e073dac714815f2d439fd66699d1
Resolve cases of CUDASetup.get_instance not being called when used
<8>:<add> CUDASetup.get_instance().add_log_entry(warning_msg, is_warning=True) <del> CUDASetup.get_instance.add_log_entry(warning_msg, is_warning=True)
# module: bitsandbytes.cuda_setup.paths def warn_in_case_of_duplicates(results_paths: Set[Path]) -> None: <0> if len(results_paths) > 1: <1> warning_msg = ( <2> f"Found duplicate {CUDA_RUNTIME_LIB} files: {results_paths}.. " <3> "We'll flip a coin and try one of these, in order to fail forward.\n" <4> "Either way, this might cause trouble in the future:\n" <5> "If you get `CUDA error: invalid device function` errors, the above " <6> "might be the cause and the solution is to make sure only one " <7> f"{CUDA_RUNTIME_LIB} in the paths that we search based on your env.") <8> CUDASetup.get_instance.add_log_entry(warning_msg, is_warning=True) <9>
===========unchanged ref 0=========== at: bitsandbytes.cextension CUDASetup() at: bitsandbytes.cextension.CUDASetup _instance = None get_instance() at: bitsandbytes.cuda_setup.paths CUDA_RUNTIME_LIB: str = "libcudart.so" at: pathlib Path() at: typing Set = _alias(set, 1, inst=False, name='Set')
bitsandbytes.cuda_setup.paths/determine_cuda_runtime_lib_path
Modified
bitsandbytes-foundation~bitsandbytes
c584482f1f13e073dac714815f2d439fd66699d1
Resolve cases of CUDASetup.get_instance not being called when used
<22>:<add> CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["CONDA_PREFIX"]} did not contain ' <del> CUDASetup.get_instance.add_log_entry(f'{candidate_env_vars["CONDA_PREFIX"]} did not contain '
# module: bitsandbytes.cuda_setup.paths def determine_cuda_runtime_lib_path() -> Union[Path, None]: <0> """ <1> Searches for a cuda installations, in the following order of priority: <2> 1. active conda env <3> 2. LD_LIBRARY_PATH <4> 3. any other env vars, while ignoring those that <5> - are known to be unrelated (see `bnb.cuda_setup.env_vars.to_be_ignored`) <6> - don't contain the path separator `/` <7> <8> If multiple libraries are found in part 3, we optimistically try one, <9> while giving a warning message. <10> """ <11> candidate_env_vars = get_potentially_lib_path_containing_env_vars() <12> <13> if "CONDA_PREFIX" in candidate_env_vars: <14> conda_libs_path = Path(candidate_env_vars["CONDA_PREFIX"]) / "lib" <15> <16> conda_cuda_libs = find_cuda_lib_in(str(conda_libs_path)) <17> warn_in_case_of_duplicates(conda_cuda_libs) <18> <19> if conda_cuda_libs: <20> return next(iter(conda_cuda_libs)) <21> <22> CUDASetup.get_instance.add_log_entry(f'{candidate_env_vars["CONDA_PREFIX"]} did not contain ' <23> f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...', is_warning=True) <24> <25> if "LD_LIBRARY_PATH" in candidate_env_vars: <26> lib_ld_cuda_libs = find_cuda_lib_in(candidate_env_vars["LD_LIBRARY_PATH"]) <27> <28> if lib_ld_cuda_libs: <29> return next(iter(lib_ld_cuda_libs)) <30> warn_in_case_of_duplicates(lib_ld_cuda_libs) <31> <32> CUDASetup.get_instance().add_</s>
===========below chunk 0=========== # module: bitsandbytes.cuda_setup.paths def determine_cuda_runtime_lib_path() -> Union[Path, None]: # offset: 1 f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...', is_warning=True) remaining_candidate_env_vars = { env_var: value for env_var, value in candidate_env_vars.items() if env_var not in {"CONDA_PREFIX", "LD_LIBRARY_PATH"} } cuda_runtime_libs = set() for env_var, value in remaining_candidate_env_vars.items(): cuda_runtime_libs.update(find_cuda_lib_in(value)) if len(cuda_runtime_libs) == 0: CUDASetup.get_instance().add_log_entry('CUDA_SETUP: WARNING! libcudart.so not found in any environmental path. Searching /usr/local/cuda/lib64...') cuda_runtime_libs.update(find_cuda_lib_in('/usr/local/cuda/lib64')) warn_in_case_of_duplicates(cuda_runtime_libs) return next(iter(cuda_runtime_libs)) if cuda_runtime_libs else None ===========unchanged ref 0=========== at: bitsandbytes.cextension CUDASetup() at: bitsandbytes.cextension.CUDASetup get_instance() at: bitsandbytes.cuda_setup.env_vars get_potentially_lib_path_containing_env_vars() -> Dict[str, str] at: bitsandbytes.cuda_setup.paths CUDA_RUNTIME_LIB: str = "libcudart.so" find_cuda_lib_in(paths_list_candidate: str) -> Set[Path] warn_in_case_of_duplicates(results_paths: Set[Path]) -> None at: pathlib Path() ===========changed ref 0=========== # module: bitsandbytes.cuda_setup.paths def warn_in_case_of_duplicates(results_paths: Set[Path]) -> None: if len(results_paths) > 1: warning_msg = ( f"Found duplicate {CUDA_RUNTIME_LIB} files: {results_paths}.. " "We'll flip a coin and try one of these, in order to fail forward.\n" "Either way, this might cause trouble in the future:\n" "If you get `CUDA error: invalid device function` errors, the above " "might be the cause and the solution is to make sure only one " f"{CUDA_RUNTIME_LIB} in the paths that we search based on your env.") + CUDASetup.get_instance().add_log_entry(warning_msg, is_warning=True) - CUDASetup.get_instance.add_log_entry(warning_msg, is_warning=True)
bitsandbytes.cuda_setup.main/check_cuda_result
Modified
bitsandbytes-foundation~bitsandbytes
c584482f1f13e073dac714815f2d439fd66699d1
Resolve cases of CUDASetup.get_instance not being called when used
<4>:<add> CUDASetup.get_instance().add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}") <del> CUDASetup.get_instance.add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}")
# module: bitsandbytes.cuda_setup.main def check_cuda_result(cuda, result_val): <0> # 3. Check for CUDA errors <1> if result_val != 0: <2> error_str = ctypes.c_char_p() <3> cuda.cuGetErrorString(result_val, ctypes.byref(error_str)) <4> CUDASetup.get_instance.add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}") <5>
===========changed ref 0=========== # module: bitsandbytes.cuda_setup.paths def warn_in_case_of_duplicates(results_paths: Set[Path]) -> None: if len(results_paths) > 1: warning_msg = ( f"Found duplicate {CUDA_RUNTIME_LIB} files: {results_paths}.. " "We'll flip a coin and try one of these, in order to fail forward.\n" "Either way, this might cause trouble in the future:\n" "If you get `CUDA error: invalid device function` errors, the above " "might be the cause and the solution is to make sure only one " f"{CUDA_RUNTIME_LIB} in the paths that we search based on your env.") + CUDASetup.get_instance().add_log_entry(warning_msg, is_warning=True) - CUDASetup.get_instance.add_log_entry(warning_msg, is_warning=True) ===========changed ref 1=========== # module: bitsandbytes.cuda_setup.paths def determine_cuda_runtime_lib_path() -> Union[Path, None]: """ Searches for a cuda installations, in the following order of priority: 1. active conda env 2. LD_LIBRARY_PATH 3. any other env vars, while ignoring those that - are known to be unrelated (see `bnb.cuda_setup.env_vars.to_be_ignored`) - don't contain the path separator `/` If multiple libraries are found in part 3, we optimistically try one, while giving a warning message. """ candidate_env_vars = get_potentially_lib_path_containing_env_vars() if "CONDA_PREFIX" in candidate_env_vars: conda_libs_path = Path(candidate_env_vars["CONDA_PREFIX"]) / "lib" conda_cuda_libs = find_cuda_lib_in(str(conda_libs_path)) warn_in_case_of_duplicates(conda_cuda_libs) if conda_cuda_libs: return next(iter(conda_cuda_libs)) + CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["CONDA_PREFIX"]} did not contain ' - CUDASetup.get_instance.add_log_entry(f'{candidate_env_vars["CONDA_PREFIX"]} did not contain ' f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...', is_warning=True) if "LD_LIBRARY_PATH" in candidate_env_vars: lib_ld_cuda_libs = find_cuda_lib_in(candidate_env_vars["LD_LIBRARY_PATH"]) if lib_ld_cuda_libs: return next(iter(lib_ld_cuda_libs)) warn_in_case_of_duplicates(lib_ld_cuda_libs) CUDASetup.get_instance</s> ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.paths def determine_cuda_runtime_lib_path() -> Union[Path, None]: # offset: 1 <s> warn_in_case_of_duplicates(lib_ld_cuda_libs) CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["LD_LIBRARY_PATH"]} did not contain ' f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...', is_warning=True) remaining_candidate_env_vars = { env_var: value for env_var, value in candidate_env_vars.items() if env_var not in {"CONDA_PREFIX", "LD_LIBRARY_PATH"} } cuda_runtime_libs = set() for env_var, value in remaining_candidate_env_vars.items(): cuda_runtime_libs.update(find_cuda_lib_in(value)) if len(cuda_runtime_libs) == 0: CUDASetup.get_instance().add_log_entry('CUDA_SETUP: WARNING! libcudart.so not found in any environmental path. Searching /usr/local/cuda/lib64...') cuda_runtime_libs.update(find_cuda_lib_in('/usr/local/cuda/lib64')) warn_in_case_of_duplicates(cuda_runtime_libs) return next(iter(cuda_runtime_libs)) if cuda_runtime_libs else None
bitsandbytes.cuda_setup.main/get_cuda_version
Modified
bitsandbytes-foundation~bitsandbytes
c584482f1f13e073dac714815f2d439fd66699d1
Resolve cases of CUDASetup.get_instance not being called when used
<4>:<add> CUDASetup.get_instance().add_log_entry(f'ERROR: libcudart.so could not be read from path: {cudart_path}!') <del> CUDASetup.get_instance.add_log_entry(f'ERROR: libcudart.so could not be read from path: {cudart_path}!')
# module: bitsandbytes.cuda_setup.main def get_cuda_version(cuda, cudart_path): <0> # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION <1> try: <2> cudart = ctypes.CDLL(cudart_path) <3> except OSError: <4> CUDASetup.get_instance.add_log_entry(f'ERROR: libcudart.so could not be read from path: {cudart_path}!') <5> return None <6> <7> version = ctypes.c_int() <8> check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ctypes.byref(version))) <9> version = int(version.value) <10> major = version//1000 <11> minor = (version-(major*1000))//10 <12> <13> if major < 11: <14> CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currenlty not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!') <15> <16> return f'{major}{minor}' <17>
===========changed ref 0=========== # module: bitsandbytes.cuda_setup.main def check_cuda_result(cuda, result_val): # 3. Check for CUDA errors if result_val != 0: error_str = ctypes.c_char_p() cuda.cuGetErrorString(result_val, ctypes.byref(error_str)) + CUDASetup.get_instance().add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}") - CUDASetup.get_instance.add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}") ===========changed ref 1=========== # module: bitsandbytes.cuda_setup.paths def warn_in_case_of_duplicates(results_paths: Set[Path]) -> None: if len(results_paths) > 1: warning_msg = ( f"Found duplicate {CUDA_RUNTIME_LIB} files: {results_paths}.. " "We'll flip a coin and try one of these, in order to fail forward.\n" "Either way, this might cause trouble in the future:\n" "If you get `CUDA error: invalid device function` errors, the above " "might be the cause and the solution is to make sure only one " f"{CUDA_RUNTIME_LIB} in the paths that we search based on your env.") + CUDASetup.get_instance().add_log_entry(warning_msg, is_warning=True) - CUDASetup.get_instance.add_log_entry(warning_msg, is_warning=True) ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.paths def determine_cuda_runtime_lib_path() -> Union[Path, None]: """ Searches for a cuda installations, in the following order of priority: 1. active conda env 2. LD_LIBRARY_PATH 3. any other env vars, while ignoring those that - are known to be unrelated (see `bnb.cuda_setup.env_vars.to_be_ignored`) - don't contain the path separator `/` If multiple libraries are found in part 3, we optimistically try one, while giving a warning message. """ candidate_env_vars = get_potentially_lib_path_containing_env_vars() if "CONDA_PREFIX" in candidate_env_vars: conda_libs_path = Path(candidate_env_vars["CONDA_PREFIX"]) / "lib" conda_cuda_libs = find_cuda_lib_in(str(conda_libs_path)) warn_in_case_of_duplicates(conda_cuda_libs) if conda_cuda_libs: return next(iter(conda_cuda_libs)) + CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["CONDA_PREFIX"]} did not contain ' - CUDASetup.get_instance.add_log_entry(f'{candidate_env_vars["CONDA_PREFIX"]} did not contain ' f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...', is_warning=True) if "LD_LIBRARY_PATH" in candidate_env_vars: lib_ld_cuda_libs = find_cuda_lib_in(candidate_env_vars["LD_LIBRARY_PATH"]) if lib_ld_cuda_libs: return next(iter(lib_ld_cuda_libs)) warn_in_case_of_duplicates(lib_ld_cuda_libs) CUDASetup.get_instance</s> ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.paths def determine_cuda_runtime_lib_path() -> Union[Path, None]: # offset: 1 <s> warn_in_case_of_duplicates(lib_ld_cuda_libs) CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["LD_LIBRARY_PATH"]} did not contain ' f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...', is_warning=True) remaining_candidate_env_vars = { env_var: value for env_var, value in candidate_env_vars.items() if env_var not in {"CONDA_PREFIX", "LD_LIBRARY_PATH"} } cuda_runtime_libs = set() for env_var, value in remaining_candidate_env_vars.items(): cuda_runtime_libs.update(find_cuda_lib_in(value)) if len(cuda_runtime_libs) == 0: CUDASetup.get_instance().add_log_entry('CUDA_SETUP: WARNING! libcudart.so not found in any environmental path. Searching /usr/local/cuda/lib64...') cuda_runtime_libs.update(find_cuda_lib_in('/usr/local/cuda/lib64')) warn_in_case_of_duplicates(cuda_runtime_libs) return next(iter(cuda_runtime_libs)) if cuda_runtime_libs else None
bitsandbytes.cuda_setup.main/get_cuda_lib_handle
Modified
bitsandbytes-foundation~bitsandbytes
4faf6cb7e983a7ceb32b6329a597e26748dc3977
Replace seemingly incorrect use of CUDA_RUNTIME_LIB
<4>:<add> CUDASetup.get_instance().add_log_entry('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!') <del> CUDA_RUNTIME_LIB.get_instance().add_log_entry('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!')
# module: bitsandbytes.cuda_setup.main def get_cuda_lib_handle(): <0> # 1. find libcuda.so library (GPU driver) (/usr/lib) <1> try: <2> cuda = ctypes.CDLL("libcuda.so") <3> except OSError: <4> CUDA_RUNTIME_LIB.get_instance().add_log_entry('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!') <5> return None <6> check_cuda_result(cuda, cuda.cuInit(0)) <7> <8> return cuda <9>
bitsandbytes.cuda_setup.main/evaluate_cuda_setup
Modified
bitsandbytes-foundation~bitsandbytes
4844aef4ff51b433d549a9acc49b11a925b6f4fe
Fixing bad error when GPU was not detected for #73.
<25>:<add> if cc == '' or cc is None: <del> if cc == '':
# module: bitsandbytes.cuda_setup.main def evaluate_cuda_setup(): <0> # we remove this for now and see how things go <1> #print('') <2> #print('='*35 + 'BUG REPORT' + '='*35) <3> #print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues') <4> #print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link') <5> #print('='*80) <6> #if not torch.cuda.is_available(): <7> #print('No GPU detected. Loading CPU library...') <8> #return binary_name <9> <10> binary_name = "libbitsandbytes_cpu.so" <11> <12> cuda_setup = CUDASetup.get_instance() <13> cudart_path = determine_cuda_runtime_lib_path() <14> if cudart_path is None: <15> cuda_setup.add_log_entry("WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!", is_warning=True) <16> return binary_name <17> <18> cuda_setup.add_log_entry((f"CUDA SETUP: CUDA runtime path found: {cudart_path}")) <19> cuda = get_cuda_lib_handle() <20> cc = get_compute_capability(cuda) <21> cuda_setup.add_log_entry(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}") <22> cuda_version_string = get_cuda_version(cuda, cudart_path) <23> <24> <25> if cc == '': <26> cuda_setup.add_log_entry("WARNING: No</s>
===========below chunk 0=========== # module: bitsandbytes.cuda_setup.main def evaluate_cuda_setup(): # offset: 1 return binary_name # 7.5 is the minimum CC vor cublaslt has_cublaslt = cc in ["7.5", "8.0", "8.6"] # TODO: # (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible) # (2) Multiple CUDA versions installed # we use ls -l instead of nvcc to determine the cuda version # since most installations will have the libcudart.so installed, but not the compiler cuda_setup.add_log_entry(f'CUDA SETUP: Detected CUDA version {cuda_version_string}') def get_binary_name(): "if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so" bin_base_name = "libbitsandbytes_cuda" if has_cublaslt: return f"{bin_base_name}{cuda_version_string}.so" else: return f"{bin_base_name}{cuda_version_string}_nocublaslt.so" binary_name = get_binary_name() return binary_name, cudart_path, cuda, cc, cuda_version_string ===========unchanged ref 0=========== at: bitsandbytes.cextension CUDASetup() at: bitsandbytes.cextension.CUDASetup _instance = None get_instance() at: bitsandbytes.cuda_setup.main get_cuda_version(cuda, cudart_path) get_cuda_lib_handle() get_compute_capability(cuda) at: bitsandbytes.cuda_setup.paths determine_cuda_runtime_lib_path() -> Union[Path, None]
bitsandbytes.cuda_setup.main/get_cuda_version
Modified
bitsandbytes-foundation~bitsandbytes
8d87c0b85214c07756b5dcdb09ceb26b0bb1cb7a
Fixed CUDA setup bugs, including #81.
<0>:<add> if cuda is None: return None <add> <del> # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION
# module: bitsandbytes.cuda_setup.main + # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION def get_cuda_version(cuda, cudart_path): <0> # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION <1> try: <2> cudart = ctypes.CDLL(cudart_path) <3> except OSError: <4> CUDASetup.get_instance().add_log_entry(f'ERROR: libcudart.so could not be read from path: {cudart_path}!') <5> return None <6> <7> version = ctypes.c_int() <8> check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ctypes.byref(version))) <9> version = int(version.value) <10> major = version//1000 <11> minor = (version-(major*1000))//10 <12> <13> if major < 11: <14> CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currenlty not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!') <15> <16> return f'{major}{minor}' <17>
===========changed ref 0=========== # module: bitsandbytes.cextension lib = CUDASetup.get_instance().lib try: if lib is None and torch.cuda.is_available(): CUDASetup.get_instance().generate_instructions() CUDASetup.get_instance().print_log_stack() raise RuntimeError(''' + CUDA Setup failed despite GPU being available. Inspect the CUDA SETUP outputs aboveto fix your environment! - CUDA Setup failed despite GPU being available. Inspect the CUDA SETUP outputs to fix your environment! If you cannot find any issues and suspect a bug, please open an issue with detals about your environment: https://github.com/TimDettmers/bitsandbytes/issues''') lib.cadam32bit_g32 lib.get_context.restype = ct.c_void_p lib.get_cusparse.restype = ct.c_void_p COMPILED_WITH_CUDA = True except AttributeError: - warn( + warn("The installed version of bitsandbytes was compiled without GPU support. " - "The installed version of bitsandbytes was compiled without GPU support. " + "8-bit optimizers and GPU quantization are unavailable.") - "8-bit optimizers and GPU quantization are unavailable." - ) COMPILED_WITH_CUDA = False
bitsandbytes.cuda_setup.main/get_compute_capability
Modified
bitsandbytes-foundation~bitsandbytes
8d87c0b85214c07756b5dcdb09ceb26b0bb1cb7a
Fixed CUDA setup bugs, including #81.
<5>:<add> if cuda is None: return None <add> <add> # TODO: handle different compute capabilities; for now, take the max <6>:<del> if ccs: <7>:<del> # TODO: handle different compute capabilities; for now, take the max <8>:<add> if ccs: return ccs[-1] <del> return ccs[-1] <9>:<del> return None
# module: bitsandbytes.cuda_setup.main # def get_compute_capability()-> Union[List[str, ...], None]: # FIXME: error def get_compute_capability(cuda): <0> """ <1> Extracts the highest compute capbility from all available GPUs, as compute <2> capabilities are downwards compatible. If no GPUs are detected, it returns <3> None. <4> """ <5> ccs = get_compute_capabilities(cuda) <6> if ccs: <7> # TODO: handle different compute capabilities; for now, take the max <8> return ccs[-1] <9> return None <10>
===========changed ref 0=========== # module: bitsandbytes.cuda_setup.main + # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION def get_cuda_version(cuda, cudart_path): + if cuda is None: return None + - # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION try: cudart = ctypes.CDLL(cudart_path) except OSError: CUDASetup.get_instance().add_log_entry(f'ERROR: libcudart.so could not be read from path: {cudart_path}!') return None version = ctypes.c_int() check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ctypes.byref(version))) version = int(version.value) major = version//1000 minor = (version-(major*1000))//10 if major < 11: CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currenlty not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!') return f'{major}{minor}' ===========changed ref 1=========== # module: bitsandbytes.cextension lib = CUDASetup.get_instance().lib try: if lib is None and torch.cuda.is_available(): CUDASetup.get_instance().generate_instructions() CUDASetup.get_instance().print_log_stack() raise RuntimeError(''' + CUDA Setup failed despite GPU being available. Inspect the CUDA SETUP outputs aboveto fix your environment! - CUDA Setup failed despite GPU being available. Inspect the CUDA SETUP outputs to fix your environment! If you cannot find any issues and suspect a bug, please open an issue with detals about your environment: https://github.com/TimDettmers/bitsandbytes/issues''') lib.cadam32bit_g32 lib.get_context.restype = ct.c_void_p lib.get_cusparse.restype = ct.c_void_p COMPILED_WITH_CUDA = True except AttributeError: - warn( + warn("The installed version of bitsandbytes was compiled without GPU support. " - "The installed version of bitsandbytes was compiled without GPU support. " + "8-bit optimizers and GPU quantization are unavailable.") - "8-bit optimizers and GPU quantization are unavailable." - ) COMPILED_WITH_CUDA = False
bitsandbytes.cuda_setup.main/evaluate_cuda_setup
Modified
bitsandbytes-foundation~bitsandbytes
8d87c0b85214c07756b5dcdb09ceb26b0bb1cb7a
Fixed CUDA setup bugs, including #81.
<6>:<del> #if not torch.cuda.is_available(): <7>:<del> #print('No GPU detected. Loading CPU library...') <8>:<del> #return binary_name <9>:<del> <10>:<del> binary_name = "libbitsandbytes_cpu.so" <11>:<add> if not torch.cuda.is_available(): return 'libsbitsandbytes_cpu.so', None, None, None, None <14>:<del> if cudart_path is None: <15>:<del> cuda_setup.add_log_entry("WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!", is_warning=True) <16>:<del> return binary_name <17>:<del> <18>:<del> cuda_setup.add_log_entry((f"CUDA SETUP: CUDA runtime path found: {cudart_path}")) <21>:<del> cuda_setup.add_log_entry(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}") <24>:<add> failure = False <add> if cudart_path is None: <add> failure = True <add> cuda_setup.add_log_entry("WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!", is_warning=True) <add> else: <add> cuda_setup.add_log_entry((f"CUDA SETUP: CUDA runtime path found: {cudart_path}")) <26>:<add> failure = True <add> cuda_setup.add_log_entry("WARNING: No GPU detected! Check your CUDA paths. Proceeding to load CPU-only library...", is_warning=True) <del> cuda_setup.add_log_entry
# module: bitsandbytes.cuda_setup.main def evaluate_cuda_setup(): <0> # we remove this for now and see how things go <1> #print('') <2> #print('='*35 + 'BUG REPORT' + '='*35) <3> #print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues') <4> #print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link') <5> #print('='*80) <6> #if not torch.cuda.is_available(): <7> #print('No GPU detected. Loading CPU library...') <8> #return binary_name <9> <10> binary_name = "libbitsandbytes_cpu.so" <11> <12> cuda_setup = CUDASetup.get_instance() <13> cudart_path = determine_cuda_runtime_lib_path() <14> if cudart_path is None: <15> cuda_setup.add_log_entry("WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!", is_warning=True) <16> return binary_name <17> <18> cuda_setup.add_log_entry((f"CUDA SETUP: CUDA runtime path found: {cudart_path}")) <19> cuda = get_cuda_lib_handle() <20> cc = get_compute_capability(cuda) <21> cuda_setup.add_log_entry(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}") <22> cuda_version_string = get_cuda_version(cuda, cudart_path) <23> <24> <25> if cc == '' or cc is None: <26> cuda_setup.add_log_entry</s>
===========below chunk 0=========== # module: bitsandbytes.cuda_setup.main def evaluate_cuda_setup(): # offset: 1 return binary_name, cudart_path, cuda, cc, cuda_version_string # 7.5 is the minimum CC vor cublaslt has_cublaslt = cc in ["7.5", "8.0", "8.6"] # TODO: # (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible) # (2) Multiple CUDA versions installed # we use ls -l instead of nvcc to determine the cuda version # since most installations will have the libcudart.so installed, but not the compiler cuda_setup.add_log_entry(f'CUDA SETUP: Detected CUDA version {cuda_version_string}') def get_binary_name(): "if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so" bin_base_name = "libbitsandbytes_cuda" if has_cublaslt: return f"{bin_base_name}{cuda_version_string}.so" else: return f"{bin_base_name}{cuda_version_string}_nocublaslt.so" binary_name = get_binary_name() return binary_name, cudart_path, cuda, cc, cuda_version_string ===========changed ref 0=========== # module: bitsandbytes.cuda_setup.main # def get_compute_capability()-> Union[List[str, ...], None]: # FIXME: error def get_compute_capability(cuda): """ Extracts the highest compute capbility from all available GPUs, as compute capabilities are downwards compatible. If no GPUs are detected, it returns None. """ + if cuda is None: return None + + # TODO: handle different compute capabilities; for now, take the max ccs = get_compute_capabilities(cuda) - if ccs: - # TODO: handle different compute capabilities; for now, take the max + if ccs: return ccs[-1] - return ccs[-1] - return None ===========changed ref 1=========== # module: bitsandbytes.cuda_setup.main + # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION def get_cuda_version(cuda, cudart_path): + if cuda is None: return None + - # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION try: cudart = ctypes.CDLL(cudart_path) except OSError: CUDASetup.get_instance().add_log_entry(f'ERROR: libcudart.so could not be read from path: {cudart_path}!') return None version = ctypes.c_int() check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ctypes.byref(version))) version = int(version.value) major = version//1000 minor = (version-(major*1000))//10 if major < 11: CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currenlty not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!') return f'{major}{minor}' ===========changed ref 2=========== # module: bitsandbytes.cextension lib = CUDASetup.get_instance().lib try: if lib is None and torch.cuda.is_available(): CUDASetup.get_instance().generate_instructions() CUDASetup.get_instance().print_log_stack() raise RuntimeError(''' + CUDA Setup failed despite GPU being available. Inspect the CUDA SETUP outputs aboveto fix your environment! - CUDA Setup failed despite GPU being available. Inspect the CUDA SETUP outputs to fix your environment! If you cannot find any issues and suspect a bug, please open an issue with detals about your environment: https://github.com/TimDettmers/bitsandbytes/issues''') lib.cadam32bit_g32 lib.get_context.restype = ct.c_void_p lib.get_cusparse.restype = ct.c_void_p COMPILED_WITH_CUDA = True except AttributeError: - warn( + warn("The installed version of bitsandbytes was compiled without GPU support. " - "The installed version of bitsandbytes was compiled without GPU support. " + "8-bit optimizers and GPU quantization are unavailable.") - "8-bit optimizers and GPU quantization are unavailable." - ) COMPILED_WITH_CUDA = False
bitsandbytes.nn.modules/StableEmbedding.__init__
Modified
bitsandbytes-foundation~bitsandbytes
62d39a237cfcfb664c3165271d2105cfbb480451
add device and dtype parameters to StableEmbedding
<9>:<add> device, <add> dtype, <10>:<add> self.norm = torch.nn.LayerNorm(embedding_dim, device=device) <del> self.norm = torch.nn.LayerNorm(embedding_dim)
<s>.nn.Embedding): def __init__( self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None, max_norm: Optional[float] = None, norm_type: float = 2.0, scale_grad_by_freq: bool = False, sparse: bool = False, _weight: Optional[Tensor] = None, + device=None, + dtype=None, ) -> None: <0> super(StableEmbedding, self).__init__( <1> num_embeddings, <2> embedding_dim, <3> padding_idx, <4> max_norm, <5> norm_type, <6> scale_grad_by_freq, <7> sparse, <8> _weight, <9> ) <10> self.norm = torch.nn.LayerNorm(embedding_dim) <11> GlobalOptimManager.get_instance().register_module_override( <12> self, "weight", {"optim_bits": 32} <13> ) <14>
bitsandbytes.nn.modules/StableEmbedding.forward
Modified
bitsandbytes-foundation~bitsandbytes
62d39a237cfcfb664c3165271d2105cfbb480451
add device and dtype parameters to StableEmbedding
<10>:<add> # always apply layer norm in full precision <add> emb = emb.to(torch.get_default_dtype()) <del> return self.norm(emb)
# module: bitsandbytes.nn.modules class StableEmbedding(torch.nn.Embedding): def forward(self, input: Tensor) -> Tensor: <0> emb = F.embedding( <1> input, <2> self.weight, <3> self.padding_idx, <4> self.max_norm, <5> self.norm_type, <6> self.scale_grad_by_freq, <7> self.sparse, <8> ) <9> <10> return self.norm(emb) <11>
===========changed ref 0=========== <s>.nn.Embedding): def __init__( self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None, max_norm: Optional[float] = None, norm_type: float = 2.0, scale_grad_by_freq: bool = False, sparse: bool = False, _weight: Optional[Tensor] = None, + device=None, + dtype=None, ) -> None: super(StableEmbedding, self).__init__( num_embeddings, embedding_dim, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse, _weight, + device, + dtype, ) + self.norm = torch.nn.LayerNorm(embedding_dim, device=device) - self.norm = torch.nn.LayerNorm(embedding_dim) GlobalOptimManager.get_instance().register_module_override( self, "weight", {"optim_bits": 32} )
bitsandbytes.cuda_setup.main/check_cuda_result
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<2>:<add> error_str = ct.c_char_p() <del> error_str = ctypes.c_char_p() <3>:<add> cuda.cuGetErrorString(result_val, ct.byref(error_str)) <del> cuda.cuGetErrorString(result_val, ctypes.byref(error_str))
# module: bitsandbytes.cuda_setup.main def check_cuda_result(cuda, result_val): <0> # 3. Check for CUDA errors <1> if result_val != 0: <2> error_str = ctypes.c_char_p() <3> cuda.cuGetErrorString(result_val, ctypes.byref(error_str)) <4> CUDASetup.get_instance().add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}") <5>
bitsandbytes.cuda_setup.main/get_cuda_version
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<3>:<add> cudart = ct.CDLL(cudart_path) <del> cudart = ctypes.CDLL(cudart_path) <8>:<add> version = ct.c_int() <del> version = ctypes.c_int() <9>:<add> check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ct.byref(version))) <del> check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ctypes.byref(version))) <15>:<add> CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currently not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!') <del> CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currenlty not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!')
# module: bitsandbytes.cuda_setup.main # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION def get_cuda_version(cuda, cudart_path): <0> if cuda is None: return None <1> <2> try: <3> cudart = ctypes.CDLL(cudart_path) <4> except OSError: <5> CUDASetup.get_instance().add_log_entry(f'ERROR: libcudart.so could not be read from path: {cudart_path}!') <6> return None <7> <8> version = ctypes.c_int() <9> check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ctypes.byref(version))) <10> version = int(version.value) <11> major = version//1000 <12> minor = (version-(major*1000))//10 <13> <14> if major < 11: <15> CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currenlty not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!') <16> <17> return f'{major}{minor}' <18>
===========changed ref 0=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 1=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main def check_cuda_result(cuda, result_val): # 3. Check for CUDA errors if result_val != 0: + error_str = ct.c_char_p() - error_str = ctypes.c_char_p() + cuda.cuGetErrorString(result_val, ct.byref(error_str)) - cuda.cuGetErrorString(result_val, ctypes.byref(error_str)) CUDASetup.get_instance().add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}")
bitsandbytes.cuda_setup.main/get_cuda_lib_handle
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<2>:<add> cuda = ct.CDLL("libcuda.so") <del> cuda = ctypes.CDLL("libcuda.so")
# module: bitsandbytes.cuda_setup.main def get_cuda_lib_handle(): <0> # 1. find libcuda.so library (GPU driver) (/usr/lib) <1> try: <2> cuda = ctypes.CDLL("libcuda.so") <3> except OSError: <4> CUDASetup.get_instance().add_log_entry('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!') <5> return None <6> check_cuda_result(cuda, cuda.cuInit(0)) <7> <8> return cuda <9>
===========changed ref 0=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 1=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main def check_cuda_result(cuda, result_val): # 3. Check for CUDA errors if result_val != 0: + error_str = ct.c_char_p() - error_str = ctypes.c_char_p() + cuda.cuGetErrorString(result_val, ct.byref(error_str)) - cuda.cuGetErrorString(result_val, ctypes.byref(error_str)) CUDASetup.get_instance().add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}") ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION def get_cuda_version(cuda, cudart_path): if cuda is None: return None try: + cudart = ct.CDLL(cudart_path) - cudart = ctypes.CDLL(cudart_path) except OSError: CUDASetup.get_instance().add_log_entry(f'ERROR: libcudart.so could not be read from path: {cudart_path}!') return None + version = ct.c_int() - version = ctypes.c_int() + check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ct.byref(version))) - check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ctypes.byref(version))) version = int(version.value) major = version//1000 minor = (version-(major*1000))//10 if major < 11: + CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currently not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!') - CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currenlty not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!') return f'{major}{minor}' ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def generate_instructions(self): + if self.cuda is None: + self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected.') + self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.') + self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:') + self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null') + self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a') + self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc') + return + + if self.cudart_path is None: + self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.') + self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable') + self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev/null') + self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a') + self.add_log_entry('</s> ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def generate_instructions(self): # offset: 1 <s>_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a') + self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from 1b into your .bashrc file, located at ~/.bashrc') + self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.') + self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/cuda_install.sh') + self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.') + self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local') + return + + make_cmd = f'CUDA_VERSION={self.cuda_version_string}' + if len(self.cuda_version_string) < 3: + make_cmd += ' make cuda92' + elif self.cuda_version_string == '110': + make_cmd += ' make cuda110' + elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0: + make_cmd += ' make cuda11x' + + has_cublaslt = is_cublasLt_compatible(self.cc) + if not has_cublaslt: + make_cmd += '_nomatmul' + </s> ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def generate_instructions(self): # offset: 2 <s> self.add_log_entry('CUDA SETUP: Something unexpected happened. Please compile from source:') + self.add_log_entry('git clone [email protected]:TimDettmers/bitsandbytes.git') + self.add_log_entry('cd bitsandbytes') + self.add_log_entry(make_cmd) + self.add_log_entry('python setup.py install') +
bitsandbytes.cuda_setup.main/get_compute_capabilities
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<10>:<add> nGpus = ct.c_int() <del> nGpus = ctypes.c_int() <11>:<add> cc_major = ct.c_int() <del> cc_major = ctypes.c_int() <12>:<add> cc_minor = ct.c_int() <del> cc_minor = ctypes.c_int() <14>:<add> device = ct.c_int() <del> device = ctypes.c_int() <16>:<add> check_cuda_result(cuda, cuda.cuDeviceGetCount(ct.byref(nGpus))) <del> check_cuda_result(cuda, cuda.cuDeviceGetCount(ctypes.byref(nGpus))) <19>:<add> check_cuda_result(cuda, cuda.cuDeviceGet(ct.byref(device), i)) <del> check_cuda_result(cuda, cuda.cuDeviceGet(ctypes.byref(device), i)) <20>:<add> ref_major = ct.byref(cc_major) <del> ref_major = ctypes.byref(cc_major) <21>:<add> ref_minor = ct.byref(cc_minor) <del> ref_minor = ctypes.byref(cc_minor)
# module: bitsandbytes.cuda_setup.main def get_compute_capabilities(cuda): <0> """ <1> 1. find libcuda.so library (GPU driver) (/usr/lib) <2> init_device -> init variables -> call function by reference <3> 2. call extern C function to determine CC <4> (https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__DEVICE__DEPRECATED.html) <5> 3. Check for CUDA errors <6> https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api <7> # bits taken from https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549 <8> """ <9> <10> nGpus = ctypes.c_int() <11> cc_major = ctypes.c_int() <12> cc_minor = ctypes.c_int() <13> <14> device = ctypes.c_int() <15> <16> check_cuda_result(cuda, cuda.cuDeviceGetCount(ctypes.byref(nGpus))) <17> ccs = [] <18> for i in range(nGpus.value): <19> check_cuda_result(cuda, cuda.cuDeviceGet(ctypes.byref(device), i)) <20> ref_major = ctypes.byref(cc_major) <21> ref_minor = ctypes.byref(cc_minor) <22> # 2. call extern C function to determine CC <23> check_cuda_result(cuda, cuda.cuDeviceComputeCapability(ref_major, ref_minor, device)) <24> ccs.append(f"{cc_major.value}.{cc_minor.value}") <25> <26> return ccs <27>
===========changed ref 0=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 1=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main def get_cuda_lib_handle(): # 1. find libcuda.so library (GPU driver) (/usr/lib) try: + cuda = ct.CDLL("libcuda.so") - cuda = ctypes.CDLL("libcuda.so") except OSError: CUDASetup.get_instance().add_log_entry('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!') return None check_cuda_result(cuda, cuda.cuInit(0)) return cuda ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main def check_cuda_result(cuda, result_val): # 3. Check for CUDA errors if result_val != 0: + error_str = ct.c_char_p() - error_str = ctypes.c_char_p() + cuda.cuGetErrorString(result_val, ct.byref(error_str)) - cuda.cuGetErrorString(result_val, ctypes.byref(error_str)) CUDASetup.get_instance().add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}") ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION def get_cuda_version(cuda, cudart_path): if cuda is None: return None try: + cudart = ct.CDLL(cudart_path) - cudart = ctypes.CDLL(cudart_path) except OSError: CUDASetup.get_instance().add_log_entry(f'ERROR: libcudart.so could not be read from path: {cudart_path}!') return None + version = ct.c_int() - version = ctypes.c_int() + check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ct.byref(version))) - check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ctypes.byref(version))) version = int(version.value) major = version//1000 minor = (version-(major*1000))//10 if major < 11: + CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currently not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!') - CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currenlty not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!') return f'{major}{minor}' ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def generate_instructions(self): + if self.cuda is None: + self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected.') + self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.') + self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:') + self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null') + self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a') + self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc') + return + + if self.cudart_path is None: + self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.') + self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable') + self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev/null') + self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a') + self.add_log_entry('</s> ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def generate_instructions(self): # offset: 1 <s>_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a') + self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from 1b into your .bashrc file, located at ~/.bashrc') + self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.') + self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/cuda_install.sh') + self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.') + self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local') + return + + make_cmd = f'CUDA_VERSION={self.cuda_version_string}' + if len(self.cuda_version_string) < 3: + make_cmd += ' make cuda92' + elif self.cuda_version_string == '110': + make_cmd += ' make cuda110' + elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0: + make_cmd += ' make cuda11x' + + has_cublaslt = is_cublasLt_compatible(self.cc) + if not has_cublaslt: + make_cmd += '_nomatmul' + </s> ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def generate_instructions(self): # offset: 2 <s> self.add_log_entry('CUDA SETUP: Something unexpected happened. Please compile from source:') + self.add_log_entry('git clone [email protected]:TimDettmers/bitsandbytes.git') + self.add_log_entry('cd bitsandbytes') + self.add_log_entry(make_cmd) + self.add_log_entry('python setup.py install') +
bitsandbytes.cuda_setup.main/evaluate_cuda_setup
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0': <del> # we remove this for now and see how things go <1>:<add> print('') <del> #print('') <2>:<add> print('='*35 + 'BUG REPORT' + '='*35) <del> #print('='*35 + 'BUG REPORT' + '='*35) <3>:<add> print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues') <del> #print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues') <4>:<add> print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link') <del> #print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link') <5>:<add> print('='*80) <del> #print('='*80) <19>:<add> cuda_setup.add_log_entry(f"CUDA SETUP: CUDA runtime path found: {cudart
# module: bitsandbytes.cuda_setup.main def evaluate_cuda_setup(): <0> # we remove this for now and see how things go <1> #print('') <2> #print('='*35 + 'BUG REPORT' + '='*35) <3> #print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues') <4> #print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link') <5> #print('='*80) <6> if not torch.cuda.is_available(): return 'libsbitsandbytes_cpu.so', None, None, None, None <7> <8> cuda_setup = CUDASetup.get_instance() <9> cudart_path = determine_cuda_runtime_lib_path() <10> cuda = get_cuda_lib_handle() <11> cc = get_compute_capability(cuda) <12> cuda_version_string = get_cuda_version(cuda, cudart_path) <13> <14> failure = False <15> if cudart_path is None: <16> failure = True <17> cuda_setup.add_log_entry("WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!", is_warning=True) <18> else: <19> cuda_setup.add_log_entry((f"CUDA SETUP: CUDA runtime path found: {cudart_path}")) <20> <21> if cc == '' or cc is None: <22> failure = True <23> cuda_setup.add_log_entry("WARNING: No GPU detected! Check your CUDA paths. Proceeding to load CPU-only library...", is_warning=True) <24> else: <25> cuda_setup.add_log</s>
===========below chunk 0=========== # module: bitsandbytes.cuda_setup.main def evaluate_cuda_setup(): # offset: 1 if cuda is None: failure = True else: cuda_setup.add_log_entry(f'CUDA SETUP: Detected CUDA version {cuda_version_string}') # 7.5 is the minimum CC vor cublaslt has_cublaslt = cc in ["7.5", "8.0", "8.6"] # TODO: # (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible) # (2) Multiple CUDA versions installed # we use ls -l instead of nvcc to determine the cuda version # since most installations will have the libcudart.so installed, but not the compiler if failure: binary_name = "libbitsandbytes_cpu.so" elif has_cublaslt: binary_name = f"libbitsandbytes_cuda{cuda_version_string}.so" else: "if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so" binary_name = f"libbitsandbytes_cuda{cuda_version_string}_nocublaslt.so" return binary_name, cudart_path, cuda, cc, cuda_version_string ===========changed ref 0=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 1=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main def get_cuda_lib_handle(): # 1. find libcuda.so library (GPU driver) (/usr/lib) try: + cuda = ct.CDLL("libcuda.so") - cuda = ctypes.CDLL("libcuda.so") except OSError: CUDASetup.get_instance().add_log_entry('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!') return None check_cuda_result(cuda, cuda.cuInit(0)) return cuda ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main def check_cuda_result(cuda, result_val): # 3. Check for CUDA errors if result_val != 0: + error_str = ct.c_char_p() - error_str = ctypes.c_char_p() + cuda.cuGetErrorString(result_val, ct.byref(error_str)) - cuda.cuGetErrorString(result_val, ctypes.byref(error_str)) CUDASetup.get_instance().add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}") ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION def get_cuda_version(cuda, cudart_path): if cuda is None: return None try: + cudart = ct.CDLL(cudart_path) - cudart = ctypes.CDLL(cudart_path) except OSError: CUDASetup.get_instance().add_log_entry(f'ERROR: libcudart.so could not be read from path: {cudart_path}!') return None + version = ct.c_int() - version = ctypes.c_int() + check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ct.byref(version))) - check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ctypes.byref(version))) version = int(version.value) major = version//1000 minor = (version-(major*1000))//10 if major < 11: + CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currently not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!') - CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currenlty not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!') return f'{major}{minor}' ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main def get_compute_capabilities(cuda): """ 1. find libcuda.so library (GPU driver) (/usr/lib) init_device -> init variables -> call function by reference 2. call extern C function to determine CC (https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__DEVICE__DEPRECATED.html) 3. Check for CUDA errors https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api # bits taken from https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549 """ + nGpus = ct.c_int() - nGpus = ctypes.c_int() + cc_major = ct.c_int() - cc_major = ctypes.c_int() + cc_minor = ct.c_int() - cc_minor = ctypes.c_int() + device = ct.c_int() - device = ctypes.c_int() + check_cuda_result(cuda, cuda.cuDeviceGetCount(ct.byref(nGpus))) - check_cuda_result(cuda, cuda.cuDeviceGetCount(ctypes.byref(nGpus))) ccs = [] for i in range(nGpus.value): + check_cuda_result(cuda, cuda.cuDeviceGet(ct.byref(device), i)) - check_cuda_result(cuda, cuda.cuDeviceGet(ctypes.byref(device), i)) + ref_major = ct.byref(cc_major) - ref_major = ctypes.byref(cc_major) + ref_minor = ct.byref(cc_minor) - ref_minor = ctypes.byref(cc_minor) # 2. call extern C</s> ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main def get_compute_capabilities(cuda): # offset: 1 <s>ref(cc_minor) - ref_minor = ctypes.byref(cc_minor) # 2. call extern C function to determine CC check_cuda_result(cuda, cuda.cuDeviceComputeCapability(ref_major, ref_minor, device)) ccs.append(f"{cc_major.value}.{cc_minor.value}") return ccs
bitsandbytes.autograd._functions/MatMul8bit.forward
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> if precision is None: <add> precision = [8, 8, 8]
# module: bitsandbytes.autograd._functions class MatMul8bit(torch.autograd.Function): @staticmethod + def forward(ctx, A, B, out=None, quant_type="vector", precision=None): - def forward(ctx, A, B, out=None, quant_type="vector", precision=[8, 8, 8]): <0> if precision[0] != 8: <1> with torch.no_grad(): <2> output = torch.matmul(A, B) <3> else: <4> if len(B.shape) == 2: <5> dim = 0 <6> else: <7> dim = 1 <8> qA, SA = F.vectorwise_quant(A, dim=-1, quant_type=quant_type) <9> qB, SB = F.vectorwise_quant(B, dim=dim, quant_type=quant_type) <10> iout = F.igemm(qA, qB) <11> output = F.vectorwise_mm_dequant(iout, SA, SB, A.dtype, quant_type) <12> <13> if A.requires_grad or B.requires_grad: <14> ctx.save_for_backward(A, B) <15> <16> ctx.quant_type = quant_type <17> ctx.precision = precision <18> <19> return output <20>
===========changed ref 0=========== # module: bitsandbytes.autograd._functions tensor = torch.Tensor """ This class pools outlier dimensions across layers. + This is particularly important for small models where outlier features - This is particularly important for small models where outlier features are less systematic and occur with low frequency. """ ===========changed ref 1=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + def is_cublasLt_compatible(cc): + has_cublaslt = False + if cc is not None: + cc_major, cc_minor = cc.split('.') + if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5): + cuda_setup.add_log_entry("WARNING: Compute capability < 7.5 detected! Proceeding to load CPU-only library...", is_warning=True) + else: + has_cublaslt = True + return has_cublaslt + ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main def check_cuda_result(cuda, result_val): # 3. Check for CUDA errors if result_val != 0: + error_str = ct.c_char_p() - error_str = ctypes.c_char_p() + cuda.cuGetErrorString(result_val, ct.byref(error_str)) - cuda.cuGetErrorString(result_val, ctypes.byref(error_str)) CUDASetup.get_instance().add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}") ===========changed ref 14=========== # module: bitsandbytes.cuda_setup.main def get_cuda_lib_handle(): # 1. find libcuda.so library (GPU driver) (/usr/lib) try: + cuda = ct.CDLL("libcuda.so") - cuda = ctypes.CDLL("libcuda.so") except OSError: CUDASetup.get_instance().add_log_entry('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!') return None check_cuda_result(cuda, cuda.cuInit(0)) return cuda ===========changed ref 15=========== # module: bitsandbytes.cuda_setup.main + def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]: + existent_directories: Set[Path] = set() + for path in candidate_paths: + try: + if path.exists(): + existent_directories.add(path) + except OSError as exc: + if exc.errno != errno.ENAMETOOLONG: + raise exc + + non_existent_directories: Set[Path] = candidate_paths - existent_directories + if non_existent_directories: + CUDASetup.get_instance().add_log_entry("WARNING: The following directories listed in your path were found to " + f"be non-existent: {non_existent_directories}", is_warning=True) + + return existent_directories + ===========changed ref 16=========== # module: bitsandbytes.cuda_setup.main + def warn_in_case_of_duplicates(results_paths: Set[Path]) -> None: + if len(results_paths) > 1: + warning_msg = ( + f"Found duplicate {CUDA_RUNTIME_LIB} files: {results_paths}.. " + "We'll flip a coin and try one of these, in order to fail forward.\n" + "Either way, this might cause trouble in the future:\n" + "If you get `CUDA error: invalid device function` errors, the above " + "might be the cause and the solution is to make sure only one " + f"{CUDA_RUNTIME_LIB} in the paths that we search based on your env.") + CUDASetup.get_instance().add_log_entry(warning_msg, is_warning=True) +
bitsandbytes.functional/create_linear_map
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> sign = (-1.0 if signed else 0.0) <add> total_values = 2**total_bits <add> if add_zero or total_bits < 8: <add> # add a zero <add> # since we simulate less bits by having zeros in the data type, we <add> # we need to center the quantization around zero and as such lose <add> # a single value <add> total_values = (2**total_bits if not signed else 2**total_bits-1) <add> <add> values = torch.linspace(sign, 1.0, total_values) <add> gap = 256 - values.numel() <add> if gap == 0: <add> return values <del> if signed: <1>:<del> return torch.linspace(-1.0, 1.0, 256) <3>:<add> l = values.numel()//2 <add> #return torch.Tensor(values[:l].tolist() + [-1e-6]*((gap//2)-1) + [0]*2 + [1e-6]*((gap//2)-1) + values[l:].tolist()) <add> return torch.Tensor(values[:l].tolist() + [0
# module: bitsandbytes.functional + def create_linear_map(signed=True, total_bits=8, add_zero=True): - def create_linear_map(signed=True): <0> if signed: <1> return torch.linspace(-1.0, 1.0, 256) <2> else: <3> return torch.linspace(0.0, 1.0, 256) <4>
===========changed ref 0=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 1=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 3=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 6=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 11=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 14=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def print_log_stack(self): - for msg, is_warning in self.cuda_setup_log: - if is_warning: - warn(msg) - else: - print(msg) - ===========changed ref 15=========== # module: bitsandbytes.cuda_setup.main + def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + ===========changed ref 16=========== # module: bitsandbytes.autograd._functions tensor = torch.Tensor """ This class pools outlier dimensions across layers. + This is particularly important for small models where outlier features - This is particularly important for small models where outlier features are less systematic and occur with low frequency. """ ===========changed ref 17=========== # module: bitsandbytes.cuda_setup.main + def is_cublasLt_compatible(cc): + has_cublaslt = False + if cc is not None: + cc_major, cc_minor = cc.split('.') + if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5): + cuda_setup.add_log_entry("WARNING: Compute capability < 7.5 detected! Proceeding to load CPU-only library...", is_warning=True) + else: + has_cublaslt = True + return has_cublaslt + ===========changed ref 18=========== # module: bitsandbytes.cuda_setup.main def check_cuda_result(cuda, result_val): # 3. Check for CUDA errors if result_val != 0: + error_str = ct.c_char_p() - error_str = ctypes.c_char_p() + cuda.cuGetErrorString(result_val, ct.byref(error_str)) - cuda.cuGetErrorString(result_val, ctypes.byref(error_str)) CUDASetup.get_instance().add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}") ===========changed ref 19=========== # module: bitsandbytes.cuda_setup.main def get_cuda_lib_handle(): # 1. find libcuda.so library (GPU driver) (/usr/lib) try: + cuda = ct.CDLL("libcuda.so") - cuda = ctypes.CDLL("libcuda.so") except OSError: CUDASetup.get_instance().add_log_entry('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!') return None check_cuda_result(cuda, cuda.cuInit(0)) return cuda ===========changed ref 20=========== # module: bitsandbytes.cuda_setup.main + def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]: + existent_directories: Set[Path] = set() + for path in candidate_paths: + try: + if path.exists(): + existent_directories.add(path) + except OSError as exc: + if exc.errno != errno.ENAMETOOLONG: + raise exc + + non_existent_directories: Set[Path] = candidate_paths - existent_directories + if non_existent_directories: + CUDASetup.get_instance().add_log_entry("WARNING: The following directories listed in your path were found to " + f"be non-existent: {non_existent_directories}", is_warning=True) + + return existent_directories +
bitsandbytes.functional/create_fp8_map
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<2>:<add> has_sign = 1 if signed else 0 <add> assert e+p == total_bits-has_sign <del> assert e+p == 7 <6>:<add> for i, val in enumerate(range(-((2**(exponent_bits-has_sign))), 2**(exponent_bits-has_sign), 1)): <del> for i, val in enumerate(range(-((2**(exponent_bits-1))), 2**(exponent_bits-1), 1)): <10>:<add> values = [] <11>:<add> #for ev in evalues: <add> bias = 2**(exponent_bits-1)-1 <add> for evalue in range(2**(exponent_bits)): <add> for bit_pattern in lst: <del> for bit_pattern in lst: <12>:<add> value = (1 if evalue != 0 else 0) <del> value = 1 <13>:<add> for i, pval in enumerate(list(bit_pattern)): <del> for i, pval in enumerate(list(bit_pattern)): <14>:<add> value += pval*(2**-(i+1)) <del> value += pval*(2**-(i+1)) <15>:<add> if evalue == 0: <add> # subnormals <add> value = value*2**-(bias-1) <add> else: <add> # normals <add> value = value*2**-(evalue-bias-2) <add> values.
# module: bitsandbytes.functional + def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2, total_bits=8): - def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2): <0> e = exponent_bits <1> p = precision_bits <2> assert e+p == 7 <3> # the exponent is biased to 2^(e-1) -1 == 0 <4> evalues = [] <5> pvalues = [] <6> for i, val in enumerate(range(-((2**(exponent_bits-1))), 2**(exponent_bits-1), 1)): <7> evalues.append(2**val) <8> <9> <10> lst = list(itertools.product([0, 1], repeat=precision_bits)) <11> for bit_pattern in lst: <12> value = 1 <13> for i, pval in enumerate(list(bit_pattern)): <14> value += pval*(2**-(i+1)) <15> pvalues.append(value) <16> <17> assert len(evalues)*len(pvalues) == 128 <18> values = [] <19> for ev in evalues: <20> for pv in pvalues: <21> values.append(-ev*pv) <22> values.append(ev*pv) <23> values.sort() <24> code = torch.Tensor(values) <25> code /= code.max() <26> code[127] = 0 <27> <28> return code <29>
===========changed ref 0=========== # module: bitsandbytes.functional + def create_linear_map(signed=True, total_bits=8, add_zero=True): - def create_linear_map(signed=True): + sign = (-1.0 if signed else 0.0) + total_values = 2**total_bits + if add_zero or total_bits < 8: + # add a zero + # since we simulate less bits by having zeros in the data type, we + # we need to center the quantization around zero and as such lose + # a single value + total_values = (2**total_bits if not signed else 2**total_bits-1) + + values = torch.linspace(sign, 1.0, total_values) + gap = 256 - values.numel() + if gap == 0: + return values - if signed: - return torch.linspace(-1.0, 1.0, 256) else: + l = values.numel()//2 + #return torch.Tensor(values[:l].tolist() + [-1e-6]*((gap//2)-1) + [0]*2 + [1e-6]*((gap//2)-1) + values[l:].tolist()) + return torch.Tensor(values[:l].tolist() + [0]*gap + values[l:].tolist()) - return torch.linspace(0.0, 1.0, 256) ===========changed ref 1=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 2=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 4=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 7=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 12=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 14=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 15=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def print_log_stack(self): - for msg, is_warning in self.cuda_setup_log: - if is_warning: - warn(msg) - else: - print(msg) - ===========changed ref 16=========== # module: bitsandbytes.cuda_setup.main + def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + ===========changed ref 17=========== # module: bitsandbytes.autograd._functions tensor = torch.Tensor """ This class pools outlier dimensions across layers. + This is particularly important for small models where outlier features - This is particularly important for small models where outlier features are less systematic and occur with low frequency. """ ===========changed ref 18=========== # module: bitsandbytes.cuda_setup.main + def is_cublasLt_compatible(cc): + has_cublaslt = False + if cc is not None: + cc_major, cc_minor = cc.split('.') + if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5): + cuda_setup.add_log_entry("WARNING: Compute capability < 7.5 detected! Proceeding to load CPU-only library...", is_warning=True) + else: + has_cublaslt = True + return has_cublaslt + ===========changed ref 19=========== # module: bitsandbytes.cuda_setup.main def check_cuda_result(cuda, result_val): # 3. Check for CUDA errors if result_val != 0: + error_str = ct.c_char_p() - error_str = ctypes.c_char_p() + cuda.cuGetErrorString(result_val, ct.byref(error_str)) - cuda.cuGetErrorString(result_val, ctypes.byref(error_str)) CUDASetup.get_instance().add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}") ===========changed ref 20=========== # module: bitsandbytes.cuda_setup.main def get_cuda_lib_handle(): # 1. find libcuda.so library (GPU driver) (/usr/lib) try: + cuda = ct.CDLL("libcuda.so") - cuda = ctypes.CDLL("libcuda.so") except OSError: CUDASetup.get_instance().add_log_entry('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!') return None check_cuda_result(cuda, cuda.cuInit(0)) return cuda
bitsandbytes.functional/create_dynamic_map
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<20>:<add> non_sign_bits = total_bits - (1 if signed else 0) <add> additional_items = 2 ** (non_sign_bits - max_exponent_bits) - 1 <del> additional_items = 2 ** (7 - n) - 1 <23>:<del> for i in range(n): <24>:<del> fraction_items = ( <25>:<del> 2 ** (i + 7 - n) + 1 if signed else 2 ** (i + 7 - n + 1) + 1 <26>:<del> ) <27>:<add> for i in range(max_exponent_bits): <add> fraction_items = int((2 ** (i + non_sign_bits - max_exponent_bits) + 1 if signed else 2 ** (i + non_sign_bits - max_exponent_bits + 1) + 1)) <29>:<add> data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist() <del> data += ((10 ** (-(n - 1) + i)) * means).tolist() <31>:<add> data += (-(10 ** (-(max_exponent_bits - 1) + i)) * means).tolist() <del> data += (-(10 ** (-(n - 1) + i)) * means).tolist() <33>:<add> if additional_items > 0: <del> if additional_items > 0: <34>:<add> boundaries = torch.linspace(0.1, 1, additional_items + 1) <del> boundaries = torch.linspace(0.1, 1, additional_items + 1) <35>:<add> means = (boundaries[:-1] + boundaries
# module: bitsandbytes.functional + def create_dynamic_map(signed=True, max_exponent_bits=7, total_bits=8): - def create_dynamic_map(signed=True, n=7): <0> """ <1> Creates the dynamic quantiztion map. <2> <3> The dynamic data type is made up of a dynamic exponent and <4> fraction. As the exponent increase from 0 to -7 the number <5> of bits available for the fraction shrinks. <6> <7> This is a generalization of the dynamic type where a certain <8> number of the bits and be reserved for the linear quantization <9> region (the fraction). n determines the maximum number of <10> exponent bits. <11> <12> For more details see <13> (8-Bit Approximations for Parallelism in Deep Learning)[https://arxiv.org/abs/1511.04561] <14> """ <15> <16> data = [] <17> # these are additional items that come from the case <18> # where all the exponent bits are zero and no <19> # indicator bit is present <20> additional_items = 2 ** (7 - n) - 1 <21> if not signed: <22> additional_items = 2 * additional_items <23> for i in range(n): <24> fraction_items = ( <25> 2 ** (i + 7 - n) + 1 if signed else 2 ** (i + 7 - n + 1) + 1 <26> ) <27> boundaries = torch.linspace(0.1, 1, fraction_items) <28> means = (boundaries[:-1] + boundaries[1:]) / 2.0 <29> data += ((10 ** (-(n - 1) + i)) * means).tolist() <30> if signed: <31> data += (-(10 ** (-(n - 1) + i)) * means).tolist() <32> <33> if additional_items > 0: <34> boundaries = torch.linspace(0.1, 1, additional_items + 1) <35> means = (boundaries[:-1] + boundaries[1:]) / 2.0 <36> data += ((10 ** (-(n - 1) + i))</s>
===========below chunk 0=========== # module: bitsandbytes.functional + def create_dynamic_map(signed=True, max_exponent_bits=7, total_bits=8): - def create_dynamic_map(signed=True, n=7): # offset: 1 if signed: data += (-(10 ** (-(n - 1) + i)) * means).tolist() data.append(0) data.append(1.0) data.sort() return Tensor(data) ===========changed ref 0=========== # module: bitsandbytes.functional + def create_linear_map(signed=True, total_bits=8, add_zero=True): - def create_linear_map(signed=True): + sign = (-1.0 if signed else 0.0) + total_values = 2**total_bits + if add_zero or total_bits < 8: + # add a zero + # since we simulate less bits by having zeros in the data type, we + # we need to center the quantization around zero and as such lose + # a single value + total_values = (2**total_bits if not signed else 2**total_bits-1) + + values = torch.linspace(sign, 1.0, total_values) + gap = 256 - values.numel() + if gap == 0: + return values - if signed: - return torch.linspace(-1.0, 1.0, 256) else: + l = values.numel()//2 + #return torch.Tensor(values[:l].tolist() + [-1e-6]*((gap//2)-1) + [0]*2 + [1e-6]*((gap//2)-1) + values[l:].tolist()) + return torch.Tensor(values[:l].tolist() + [0]*gap + values[l:].tolist()) - return torch.linspace(0.0, 1.0, 256) ===========changed ref 1=========== # module: bitsandbytes.functional + def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2, total_bits=8): - def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2): e = exponent_bits p = precision_bits + has_sign = 1 if signed else 0 + assert e+p == total_bits-has_sign - assert e+p == 7 # the exponent is biased to 2^(e-1) -1 == 0 evalues = [] pvalues = [] + for i, val in enumerate(range(-((2**(exponent_bits-has_sign))), 2**(exponent_bits-has_sign), 1)): - for i, val in enumerate(range(-((2**(exponent_bits-1))), 2**(exponent_bits-1), 1)): evalues.append(2**val) + values = [] lst = list(itertools.product([0, 1], repeat=precision_bits)) + #for ev in evalues: + bias = 2**(exponent_bits-1)-1 + for evalue in range(2**(exponent_bits)): + for bit_pattern in lst: - for bit_pattern in lst: + value = (1 if evalue != 0 else 0) - value = 1 + for i, pval in enumerate(list(bit_pattern)): - for i, pval in enumerate(list(bit_pattern)): + value += pval*(2**-(i+1)) - value += pval*(2**-(i+1)) + if evalue == 0: + # subnormals + value = value*2**-(bias-1) + else: + # normals + value = value*2**-(evalue-bias-2) + values.append(value) - pvalues.append(value) + if signed: + values.append(-value) + + assert len(</s> ===========changed ref 2=========== # module: bitsandbytes.functional + def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2, total_bits=8): - def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2): # offset: 1 <s>values.append(value) + if signed: + values.append(-value) + + assert len(values) == 2**total_bits + values.sort() + if total_bits < 8: + gap = 256 - len(values) + for i in range(gap): - assert len(evalues)*len(pvalues) == 128 - values = [] - for ev in evalues: - for pv in pvalues: - values.append(-ev*pv) + values.append(0) - values.append(ev*pv) values.sort() code = torch.Tensor(values) code /= code.max() - code[127] = 0 return code ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 4=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 6=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 9=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 14=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 15=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) +
bitsandbytes.functional/get_special_format_str
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<1>:<add> major, _minor = torch.cuda.get_device_capability() <del> major, minor = torch.cuda.get_device_capability() <4>:<add> if major == 8: <del> elif major == 8: <6>:<del> else: <7>:<add> return "col_turing" <del> return "col_turing"
# module: bitsandbytes.functional def get_special_format_str(): <0> if not torch.cuda.is_available(): return 'col_turing' <1> major, minor = torch.cuda.get_device_capability() <2> if major <= 7: <3> return "col_turing" <4> elif major == 8: <5> return "col_ampere" <6> else: <7> return "col_turing" <8>
===========changed ref 0=========== # module: bitsandbytes.functional + def create_linear_map(signed=True, total_bits=8, add_zero=True): - def create_linear_map(signed=True): + sign = (-1.0 if signed else 0.0) + total_values = 2**total_bits + if add_zero or total_bits < 8: + # add a zero + # since we simulate less bits by having zeros in the data type, we + # we need to center the quantization around zero and as such lose + # a single value + total_values = (2**total_bits if not signed else 2**total_bits-1) + + values = torch.linspace(sign, 1.0, total_values) + gap = 256 - values.numel() + if gap == 0: + return values - if signed: - return torch.linspace(-1.0, 1.0, 256) else: + l = values.numel()//2 + #return torch.Tensor(values[:l].tolist() + [-1e-6]*((gap//2)-1) + [0]*2 + [1e-6]*((gap//2)-1) + values[l:].tolist()) + return torch.Tensor(values[:l].tolist() + [0]*gap + values[l:].tolist()) - return torch.linspace(0.0, 1.0, 256) ===========changed ref 1=========== # module: bitsandbytes.functional + def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2, total_bits=8): - def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2): e = exponent_bits p = precision_bits + has_sign = 1 if signed else 0 + assert e+p == total_bits-has_sign - assert e+p == 7 # the exponent is biased to 2^(e-1) -1 == 0 evalues = [] pvalues = [] + for i, val in enumerate(range(-((2**(exponent_bits-has_sign))), 2**(exponent_bits-has_sign), 1)): - for i, val in enumerate(range(-((2**(exponent_bits-1))), 2**(exponent_bits-1), 1)): evalues.append(2**val) + values = [] lst = list(itertools.product([0, 1], repeat=precision_bits)) + #for ev in evalues: + bias = 2**(exponent_bits-1)-1 + for evalue in range(2**(exponent_bits)): + for bit_pattern in lst: - for bit_pattern in lst: + value = (1 if evalue != 0 else 0) - value = 1 + for i, pval in enumerate(list(bit_pattern)): - for i, pval in enumerate(list(bit_pattern)): + value += pval*(2**-(i+1)) - value += pval*(2**-(i+1)) + if evalue == 0: + # subnormals + value = value*2**-(bias-1) + else: + # normals + value = value*2**-(evalue-bias-2) + values.append(value) - pvalues.append(value) + if signed: + values.append(-value) + + assert len(</s> ===========changed ref 2=========== # module: bitsandbytes.functional + def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2, total_bits=8): - def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2): # offset: 1 <s>values.append(value) + if signed: + values.append(-value) + + assert len(values) == 2**total_bits + values.sort() + if total_bits < 8: + gap = 256 - len(values) + for i in range(gap): - assert len(evalues)*len(pvalues) == 128 - values = [] - for ev in evalues: - for pv in pvalues: - values.append(-ev*pv) + values.append(0) - values.append(ev*pv) values.sort() code = torch.Tensor(values) code /= code.max() - code[127] = 0 return code ===========changed ref 3=========== # module: bitsandbytes.functional + def create_dynamic_map(signed=True, max_exponent_bits=7, total_bits=8): - def create_dynamic_map(signed=True, n=7): """ Creates the dynamic quantiztion map. The dynamic data type is made up of a dynamic exponent and fraction. As the exponent increase from 0 to -7 the number of bits available for the fraction shrinks. This is a generalization of the dynamic type where a certain number of the bits and be reserved for the linear quantization region (the fraction). n determines the maximum number of exponent bits. For more details see (8-Bit Approximations for Parallelism in Deep Learning)[https://arxiv.org/abs/1511.04561] """ data = [] # these are additional items that come from the case # where all the exponent bits are zero and no # indicator bit is present + non_sign_bits = total_bits - (1 if signed else 0) + additional_items = 2 ** (non_sign_bits - max_exponent_bits) - 1 - additional_items = 2 ** (7 - n) - 1 if not signed: additional_items = 2 * additional_items - for i in range(n): - fraction_items = ( - 2 ** (i + 7 - n) + 1 if signed else 2 ** (i + 7 - n + 1) + 1 - ) + for i in range(max_exponent_bits): + fraction_items = int((2 ** (i + non_sign_bits - max_exponent_bits) + 1 if signed else 2 ** (i + non_sign_bits - max_exponent_bits + 1) + 1)) boundaries = torch.linspace(0.1, 1, fraction_items) means = (boundaries[:-1] + boundaries[1:]) / 2.0 + data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist() - data += ((10 ** (-(n -</s>
bitsandbytes.functional/nvidia_transform
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<26>:<del> ptrA = get_ptr(A) <27>:<del> ptrOut = get_ptr(out)
# module: bitsandbytes.functional def nvidia_transform( A, to_order, from_order="row", out=None, transpose=False, state=None, ld=None, ): <0> if state is None: <1> state = (A.shape, from_order) <2> else: <3> from_order = state[1] <4> if out is None: <5> out, new_state = get_transform_buffer( <6> state[0], A.dtype, A.device, to_order, state[1] <7> ) <8> else: <9> new_state = (state[1], to_order) <10> func = get_transform_func(A.dtype, from_order, to_order, transpose) <11> <12> shape = state[0] <13> if len(shape) == 2: <14> dim1 = ct.c_int32(shape[0]) <15> dim2 = ct.c_int32(shape[1]) <16> elif ld is not None: <17> n = prod(shape) <18> dim1 = prod([shape[i] for i in ld]) <19> dim2 = ct.c_int32(n // dim1) <20> dim1 = ct.c_int32(dim1) <21> else: <22> dim1 = ct.c_int32(shape[0] * shape[1]) <23> dim2 = ct.c_int32(shape[2]) <24> <25> ptr = CUBLAS_Context.get_instance().get_context(A.device) <26> ptrA = get_ptr(A) <27> ptrOut = get_ptr(out) <28> func(ptr, get_ptr(A), get_ptr(out), dim1, dim2) <29> <30> return out, new_state <31>
===========changed ref 0=========== # module: bitsandbytes.functional + def create_quantile_map(A, total_bits=8): + q = estimate_quantiles(A, num_quantiles=2**total_bits-1) + q = q.tolist() + q.append(0) + + gap = 256 - len(q) + for i in range(gap): + q.append(0) + + q.sort() + + q = Tensor(q) + q = q/q.abs().max() + return q + ===========changed ref 1=========== # module: bitsandbytes.functional def get_special_format_str(): if not torch.cuda.is_available(): return 'col_turing' + major, _minor = torch.cuda.get_device_capability() - major, minor = torch.cuda.get_device_capability() if major <= 7: return "col_turing" + if major == 8: - elif major == 8: return "col_ampere" - else: + return "col_turing" - return "col_turing" ===========changed ref 2=========== # module: bitsandbytes.functional + def create_linear_map(signed=True, total_bits=8, add_zero=True): - def create_linear_map(signed=True): + sign = (-1.0 if signed else 0.0) + total_values = 2**total_bits + if add_zero or total_bits < 8: + # add a zero + # since we simulate less bits by having zeros in the data type, we + # we need to center the quantization around zero and as such lose + # a single value + total_values = (2**total_bits if not signed else 2**total_bits-1) + + values = torch.linspace(sign, 1.0, total_values) + gap = 256 - values.numel() + if gap == 0: + return values - if signed: - return torch.linspace(-1.0, 1.0, 256) else: + l = values.numel()//2 + #return torch.Tensor(values[:l].tolist() + [-1e-6]*((gap//2)-1) + [0]*2 + [1e-6]*((gap//2)-1) + values[l:].tolist()) + return torch.Tensor(values[:l].tolist() + [0]*gap + values[l:].tolist()) - return torch.linspace(0.0, 1.0, 256) ===========changed ref 3=========== # module: bitsandbytes.functional + def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2, total_bits=8): - def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2): e = exponent_bits p = precision_bits + has_sign = 1 if signed else 0 + assert e+p == total_bits-has_sign - assert e+p == 7 # the exponent is biased to 2^(e-1) -1 == 0 evalues = [] pvalues = [] + for i, val in enumerate(range(-((2**(exponent_bits-has_sign))), 2**(exponent_bits-has_sign), 1)): - for i, val in enumerate(range(-((2**(exponent_bits-1))), 2**(exponent_bits-1), 1)): evalues.append(2**val) + values = [] lst = list(itertools.product([0, 1], repeat=precision_bits)) + #for ev in evalues: + bias = 2**(exponent_bits-1)-1 + for evalue in range(2**(exponent_bits)): + for bit_pattern in lst: - for bit_pattern in lst: + value = (1 if evalue != 0 else 0) - value = 1 + for i, pval in enumerate(list(bit_pattern)): - for i, pval in enumerate(list(bit_pattern)): + value += pval*(2**-(i+1)) - value += pval*(2**-(i+1)) + if evalue == 0: + # subnormals + value = value*2**-(bias-1) + else: + # normals + value = value*2**-(evalue-bias-2) + values.append(value) - pvalues.append(value) + if signed: + values.append(-value) + + assert len(</s> ===========changed ref 4=========== # module: bitsandbytes.functional + def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2, total_bits=8): - def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2): # offset: 1 <s>values.append(value) + if signed: + values.append(-value) + + assert len(values) == 2**total_bits + values.sort() + if total_bits < 8: + gap = 256 - len(values) + for i in range(gap): - assert len(evalues)*len(pvalues) == 128 - values = [] - for ev in evalues: - for pv in pvalues: - values.append(-ev*pv) + values.append(0) - values.append(ev*pv) values.sort() code = torch.Tensor(values) code /= code.max() - code[127] = 0 return code ===========changed ref 5=========== # module: bitsandbytes.functional + def create_dynamic_map(signed=True, max_exponent_bits=7, total_bits=8): - def create_dynamic_map(signed=True, n=7): """ Creates the dynamic quantiztion map. The dynamic data type is made up of a dynamic exponent and fraction. As the exponent increase from 0 to -7 the number of bits available for the fraction shrinks. This is a generalization of the dynamic type where a certain number of the bits and be reserved for the linear quantization region (the fraction). n determines the maximum number of exponent bits. For more details see (8-Bit Approximations for Parallelism in Deep Learning)[https://arxiv.org/abs/1511.04561] """ data = [] # these are additional items that come from the case # where all the exponent bits are zero and no # indicator bit is present + non_sign_bits = total_bits - (1 if signed else 0) + additional_items = 2 ** (non_sign_bits - max_exponent_bits) - 1 - additional_items = 2 ** (7 - n) - 1 if not signed: additional_items = 2 * additional_items - for i in range(n): - fraction_items = ( - 2 ** (i + 7 - n) + 1 if signed else 2 ** (i + 7 - n + 1) + 1 - ) + for i in range(max_exponent_bits): + fraction_items = int((2 ** (i + non_sign_bits - max_exponent_bits) + 1 if signed else 2 ** (i + non_sign_bits - max_exponent_bits + 1) + 1)) boundaries = torch.linspace(0.1, 1, fraction_items) means = (boundaries[:-1] + boundaries[1:]) / 2.0 + data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist() - data += ((10 ** (-(n -</s>
bitsandbytes.functional/estimate_quantiles
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<19>:<add> The offset for the first and last quantile from 0 and 1. Default: 1/(2*num_quantiles) <del> The offset for the first and last quantile from 0 and 1. Default: 1/512 <20>:<add> num_quantiles : int <add> The number of equally spaced quantiles. <26>:<add> if A.numel() < 256: raise NotImplementedError(f'Quantile estimation needs at least 256 values in the Tensor, but Tensor had only {A.numel()} values.') <add> if num_quantiles > 256: raise NotImplementedError(f"Currently only a maximum of 256 equally spaced quantiles are supported, but the argument num_quantiles={num_quantiles}") <add> if num_quantiles < 256 and offset == 1/(512): <add> # override default arguments <add> offset = 1/(2*num_quantiles) <add> <28>:<add> device = pre_call(A.device) <29>:<del> lib.cestimate_quantiles_fp32( <30>:<add> lib.cestimate_quantiles_fp32(get_ptr(A), get_ptr(out), ct.c_float
# module: bitsandbytes.functional - def estimate_quantiles( - A: Tensor, out: Tensor = None, offset: float = 1 / 512 - ) -> Tensor: + def estimate_quantiles(A: Tensor, out: Tensor = None, offset: float = 1 / 512, num_quantiles=256) -> Tensor: <0> ''' <1> Estimates 256 equidistant quantiles on the input tensor eCDF. <2> <3> Uses SRAM-Quantiles algorithm to quickly estimate 256 equidistant quantiles <4> via the eCDF of the input tensor `A`. This is a fast but approximate algorithm <5> and the extreme quantiles close to 0 and 1 have high variance / large estimation <6> errors. These large errors can be avoided by using the offset variable which trims <7> the distribution. The default offset value of 1/512 ensures minimum entropy encoding -- it <8> trims 1/512 = 0.2% from each side of the distrivution. An offset value of 0.01 to 0.02 <9> usually has a much lower error but is not a minimum entropy encoding. Given an offset <10> of 0.02 equidistance points in the range [0.02, 0.98] are used for the quantiles. <11> <12> Parameters <13> ---------- <14> A : torch.Tensor <15> The input tensor. Any shape. <16> out : torch.Tensor <17> Tensor with the 256 estimated quantiles. <18> offset : float <19> The offset for the first and last quantile from 0 and 1. Default: 1/512 <20> <21> Returns <22> ------- <23> torch.Tensor: <24> The 256 quantiles in float32 datatype. <25> ''' <26> if out is None: out = torch.zeros((256,), dtype=torch.float32, device=A.device) <27> is_on_gpu([A, out]) <28> if A.dtype == torch.float32: <29> lib.cestimate_quantiles_fp32( <30> get_ptr(A), get_ptr(out), ct.c_float(offset), ct.c_int(A.numel()) <31> ) <32> elif A</s>
===========below chunk 0=========== # module: bitsandbytes.functional - def estimate_quantiles( - A: Tensor, out: Tensor = None, offset: float = 1 / 512 - ) -> Tensor: + def estimate_quantiles(A: Tensor, out: Tensor = None, offset: float = 1 / 512, num_quantiles=256) -> Tensor: # offset: 1 lib.cestimate_quantiles_fp16( get_ptr(A), get_ptr(out), ct.c_float(offset), ct.c_int(A.numel()) ) else: raise NotImplementedError(f"Not supported data type {A.dtype}") return out ===========changed ref 0=========== # module: bitsandbytes.functional + def create_quantile_map(A, total_bits=8): + q = estimate_quantiles(A, num_quantiles=2**total_bits-1) + q = q.tolist() + q.append(0) + + gap = 256 - len(q) + for i in range(gap): + q.append(0) + + q.sort() + + q = Tensor(q) + q = q/q.abs().max() + return q + ===========changed ref 1=========== # module: bitsandbytes.functional def get_special_format_str(): if not torch.cuda.is_available(): return 'col_turing' + major, _minor = torch.cuda.get_device_capability() - major, minor = torch.cuda.get_device_capability() if major <= 7: return "col_turing" + if major == 8: - elif major == 8: return "col_ampere" - else: + return "col_turing" - return "col_turing" ===========changed ref 2=========== # module: bitsandbytes.functional def nvidia_transform( A, to_order, from_order="row", out=None, transpose=False, state=None, ld=None, ): if state is None: state = (A.shape, from_order) else: from_order = state[1] if out is None: out, new_state = get_transform_buffer( state[0], A.dtype, A.device, to_order, state[1] ) else: new_state = (state[1], to_order) func = get_transform_func(A.dtype, from_order, to_order, transpose) shape = state[0] if len(shape) == 2: dim1 = ct.c_int32(shape[0]) dim2 = ct.c_int32(shape[1]) elif ld is not None: n = prod(shape) dim1 = prod([shape[i] for i in ld]) dim2 = ct.c_int32(n // dim1) dim1 = ct.c_int32(dim1) else: dim1 = ct.c_int32(shape[0] * shape[1]) dim2 = ct.c_int32(shape[2]) ptr = CUBLAS_Context.get_instance().get_context(A.device) - ptrA = get_ptr(A) - ptrOut = get_ptr(out) func(ptr, get_ptr(A), get_ptr(out), dim1, dim2) return out, new_state ===========changed ref 3=========== # module: bitsandbytes.functional + def create_linear_map(signed=True, total_bits=8, add_zero=True): - def create_linear_map(signed=True): + sign = (-1.0 if signed else 0.0) + total_values = 2**total_bits + if add_zero or total_bits < 8: + # add a zero + # since we simulate less bits by having zeros in the data type, we + # we need to center the quantization around zero and as such lose + # a single value + total_values = (2**total_bits if not signed else 2**total_bits-1) + + values = torch.linspace(sign, 1.0, total_values) + gap = 256 - values.numel() + if gap == 0: + return values - if signed: - return torch.linspace(-1.0, 1.0, 256) else: + l = values.numel()//2 + #return torch.Tensor(values[:l].tolist() + [-1e-6]*((gap//2)-1) + [0]*2 + [1e-6]*((gap//2)-1) + values[l:].tolist()) + return torch.Tensor(values[:l].tolist() + [0]*gap + values[l:].tolist()) - return torch.linspace(0.0, 1.0, 256) ===========changed ref 4=========== # module: bitsandbytes.functional + def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2, total_bits=8): - def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2): e = exponent_bits p = precision_bits + has_sign = 1 if signed else 0 + assert e+p == total_bits-has_sign - assert e+p == 7 # the exponent is biased to 2^(e-1) -1 == 0 evalues = [] pvalues = [] + for i, val in enumerate(range(-((2**(exponent_bits-has_sign))), 2**(exponent_bits-has_sign), 1)): - for i, val in enumerate(range(-((2**(exponent_bits-1))), 2**(exponent_bits-1), 1)): evalues.append(2**val) + values = [] lst = list(itertools.product([0, 1], repeat=precision_bits)) + #for ev in evalues: + bias = 2**(exponent_bits-1)-1 + for evalue in range(2**(exponent_bits)): + for bit_pattern in lst: - for bit_pattern in lst: + value = (1 if evalue != 0 else 0) - value = 1 + for i, pval in enumerate(list(bit_pattern)): - for i, pval in enumerate(list(bit_pattern)): + value += pval*(2**-(i+1)) - value += pval*(2**-(i+1)) + if evalue == 0: + # subnormals + value = value*2**-(bias-1) + else: + # normals + value = value*2**-(evalue-bias-2) + values.append(value) - pvalues.append(value) + if signed: + values.append(-value) + + assert len(</s> ===========changed ref 5=========== # module: bitsandbytes.functional + def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2, total_bits=8): - def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2): # offset: 1 <s>values.append(value) + if signed: + values.append(-value) + + assert len(values) == 2**total_bits + values.sort() + if total_bits < 8: + gap = 256 - len(values) + for i in range(gap): - assert len(evalues)*len(pvalues) == 128 - values = [] - for ev in evalues: - for pv in pvalues: - values.append(-ev*pv) + values.append(0) - values.append(ev*pv) values.sort() code = torch.Tensor(values) code /= code.max() - code[127] = 0 return code
bitsandbytes.functional/quantize_blockwise
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<28>:<add> <32>:<del> code = code.to(A.device) <36>:<del> blocksize = (blocksize if A.device.type == 'cpu' else 4096) <45>:<add> assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64] <add> cblocksize = ct.c_int32(blocksize) <add> prev_device = pre_call(A.device) <add> code = code.to(A.device) <del> is_on_gpu([code, A, absmax, out, rand]) <47>:<add> is_on_gpu([code, A, out, absmax, rand]) <add> assert blocksize==4096
# module: bitsandbytes.functional def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, rand=None, out: Tensor = None, blocksize=4096) -> Tensor: <0> """ <1> Quantize tensor A in blocks of size 4096 values. <2> <3> Quantizes tensor A by dividing it into blocks of 4096 values. <4> Then the absolute maximum value within these blocks is calculated <5> for the non-linear quantization. <6> <7> Parameters <8> ---------- <9> A : torch.Tensor <10> The input tensor. <11> code : torch.Tensor <12> The quantization map. <13> absmax : torch.Tensor <14> The absmax values. <15> rand : torch.Tensor <16> The tensor for stochastic rounding. <17> out : torch.Tensor <18> The output tensor (8-bit). <19> <20> Returns <21> ------- <22> torch.Tensor: <23> The 8-bit tensor. <24> tuple(torch.Tensor, torch.Tensor): <25> The quantization state to undo the quantization. <26> """ <27> <28> if code is None: <29> if "dynamic" not in name2qmap: <30> name2qmap["dynamic"] = create_dynamic_map().to(A.device) <31> code = name2qmap["dynamic"] <32> code = code.to(A.device) <33> <34> if absmax is None: <35> n = A.numel() <36> blocksize = (blocksize if A.device.type == 'cpu' else 4096) <37> blocks = n // blocksize <38> blocks += 1 if n % blocksize > 0 else 0 <39> absmax = torch.zeros((blocks,), device=A.device) <40> <41> if out is None: <42> out = torch.zeros_like(A, dtype=torch.uint8) <43> <44> if A.device.type != 'cpu': <45> is_on_gpu([code, A, absmax, out, rand]) <46> if rand is not None: <47> assert rand.numel() >= 1024 <48> </s>
===========below chunk 0=========== # module: bitsandbytes.functional def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, rand=None, out: Tensor = None, blocksize=4096) -> Tensor: # offset: 1 if A.dtype == torch.float32: lib.cquantize_blockwise_stochastic_fp32(get_ptr(code), get_ptr(A),get_ptr(absmax), get_ptr(out), get_ptr(rand), ct.c_int32(rand_offset), ct.c_int(A.numel())) elif A.dtype == torch.float16: lib.cquantize_blockwise_stochastic_fp16(get_ptr(code), get_ptr(A),get_ptr(absmax), get_ptr(out), get_ptr(rand), ct.c_int32(rand_offset), ct.c_int(A.numel())) else: raise ValueError( f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}" ) else: if A.dtype == torch.float32: lib.cquantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out),ct.c_int(A.numel())) elif A.dtype == torch.float16: lib.cquantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out),ct.c_int(A.numel())) else: raise ValueError( f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}" ) else: # cpu assert rand is None lib.cquantize_blockwise_cpu_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel())) return out, (</s> ===========below chunk 1=========== # module: bitsandbytes.functional def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, rand=None, out: Tensor = None, blocksize=4096) -> Tensor: # offset: 2 <s> ct.c_longlong(blocksize), ct.c_longlong(A.numel())) return out, (absmax, code) ===========changed ref 0=========== # module: bitsandbytes.functional + def create_quantile_map(A, total_bits=8): + q = estimate_quantiles(A, num_quantiles=2**total_bits-1) + q = q.tolist() + q.append(0) + + gap = 256 - len(q) + for i in range(gap): + q.append(0) + + q.sort() + + q = Tensor(q) + q = q/q.abs().max() + return q + ===========changed ref 1=========== # module: bitsandbytes.functional def get_special_format_str(): if not torch.cuda.is_available(): return 'col_turing' + major, _minor = torch.cuda.get_device_capability() - major, minor = torch.cuda.get_device_capability() if major <= 7: return "col_turing" + if major == 8: - elif major == 8: return "col_ampere" - else: + return "col_turing" - return "col_turing" ===========changed ref 2=========== # module: bitsandbytes.functional def nvidia_transform( A, to_order, from_order="row", out=None, transpose=False, state=None, ld=None, ): if state is None: state = (A.shape, from_order) else: from_order = state[1] if out is None: out, new_state = get_transform_buffer( state[0], A.dtype, A.device, to_order, state[1] ) else: new_state = (state[1], to_order) func = get_transform_func(A.dtype, from_order, to_order, transpose) shape = state[0] if len(shape) == 2: dim1 = ct.c_int32(shape[0]) dim2 = ct.c_int32(shape[1]) elif ld is not None: n = prod(shape) dim1 = prod([shape[i] for i in ld]) dim2 = ct.c_int32(n // dim1) dim1 = ct.c_int32(dim1) else: dim1 = ct.c_int32(shape[0] * shape[1]) dim2 = ct.c_int32(shape[2]) ptr = CUBLAS_Context.get_instance().get_context(A.device) - ptrA = get_ptr(A) - ptrOut = get_ptr(out) func(ptr, get_ptr(A), get_ptr(out), dim1, dim2) return out, new_state ===========changed ref 3=========== # module: bitsandbytes.functional + def create_linear_map(signed=True, total_bits=8, add_zero=True): - def create_linear_map(signed=True): + sign = (-1.0 if signed else 0.0) + total_values = 2**total_bits + if add_zero or total_bits < 8: + # add a zero + # since we simulate less bits by having zeros in the data type, we + # we need to center the quantization around zero and as such lose + # a single value + total_values = (2**total_bits if not signed else 2**total_bits-1) + + values = torch.linspace(sign, 1.0, total_values) + gap = 256 - values.numel() + if gap == 0: + return values - if signed: - return torch.linspace(-1.0, 1.0, 256) else: + l = values.numel()//2 + #return torch.Tensor(values[:l].tolist() + [-1e-6]*((gap//2)-1) + [0]*2 + [1e-6]*((gap//2)-1) + values[l:].tolist()) + return torch.Tensor(values[:l].tolist() + [0]*gap + values[l:].tolist()) - return torch.linspace(0.0, 1.0, 256)
bitsandbytes.functional/dequantize_blockwise
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<30>:<del> code = code.to(A.device) <36>:<add> else: <add> absmax, code = quant_state <39>:<add> device = pre_call(A.device) <add> code = code.to(A.device) <add> if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]: <del> if blocksize not in [2048, 4096]: <40>:<add> raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048, 4096, 1024, 512, 256, 128, 64]") <del> raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048 4096]") <43>:<add> lib.cdequantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel())) <del> lib.cdequantize_blockwise_
# module: bitsandbytes.functional def dequantize_blockwise( A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, code: Tensor = None, out: Tensor = None, blocksize: int = 4096, ) -> Tensor: <0> """ <1> Dequantizes blockwise quantized values. <2> <3> Dequantizes the tensor A with maximum absolute values absmax in <4> blocks of size 4096. <5> <6> Parameters <7> ---------- <8> A : torch.Tensor <9> The input 8-bit tensor. <10> quant_state : tuple(torch.Tensor, torch.Tensor) <11> Tuple of code and absmax values. <12> absmax : torch.Tensor <13> The absmax values. <14> code : torch.Tensor <15> The quantization map. <16> out : torch.Tensor <17> Dequantized output tensor (default: float32) <18> <19> <20> Returns <21> ------- <22> torch.Tensor: <23> Dequantized tensor (default: float32) <24> """ <25> assert quant_state is not None or absmax is not None <26> if code is None and quant_state is None: <27> if "dynamic" not in name2qmap: <28> name2qmap["dynamic"] = create_dynamic_map().to(A.device) <29> code = name2qmap["dynamic"] <30> code = code.to(A.device) <31> <32> if out is None: <33> out = torch.zeros_like(A, dtype=torch.float32) <34> if quant_state is None: <35> quant_state = (absmax, code) <36> <37> <38> if A.device.type != 'cpu': <39> if blocksize not in [2048, 4096]: <40> raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048 4096]") <41> is_on_gpu([A, out]) <42> if out.dtype == torch.float32: <43> lib.cdequantize_blockwise_</s>
===========below chunk 0=========== # module: bitsandbytes.functional def dequantize_blockwise( A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, code: Tensor = None, out: Tensor = None, blocksize: int = 4096, ) -> Tensor: # offset: 1 elif out.dtype == torch.float16: lib.cdequantize_blockwise_fp16(get_ptr(quant_state[1]), get_ptr(A), get_ptr(quant_state[0]), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel())) else: raise ValueError( f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}" ) else: lib.cdequantize_blockwise_cpu_fp32(get_ptr(quant_state[1]), get_ptr(A), get_ptr(quant_state[0]), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel())) return out ===========changed ref 0=========== # module: bitsandbytes.functional + def create_quantile_map(A, total_bits=8): + q = estimate_quantiles(A, num_quantiles=2**total_bits-1) + q = q.tolist() + q.append(0) + + gap = 256 - len(q) + for i in range(gap): + q.append(0) + + q.sort() + + q = Tensor(q) + q = q/q.abs().max() + return q + ===========changed ref 1=========== # module: bitsandbytes.functional def get_special_format_str(): if not torch.cuda.is_available(): return 'col_turing' + major, _minor = torch.cuda.get_device_capability() - major, minor = torch.cuda.get_device_capability() if major <= 7: return "col_turing" + if major == 8: - elif major == 8: return "col_ampere" - else: + return "col_turing" - return "col_turing" ===========changed ref 2=========== # module: bitsandbytes.functional def nvidia_transform( A, to_order, from_order="row", out=None, transpose=False, state=None, ld=None, ): if state is None: state = (A.shape, from_order) else: from_order = state[1] if out is None: out, new_state = get_transform_buffer( state[0], A.dtype, A.device, to_order, state[1] ) else: new_state = (state[1], to_order) func = get_transform_func(A.dtype, from_order, to_order, transpose) shape = state[0] if len(shape) == 2: dim1 = ct.c_int32(shape[0]) dim2 = ct.c_int32(shape[1]) elif ld is not None: n = prod(shape) dim1 = prod([shape[i] for i in ld]) dim2 = ct.c_int32(n // dim1) dim1 = ct.c_int32(dim1) else: dim1 = ct.c_int32(shape[0] * shape[1]) dim2 = ct.c_int32(shape[2]) ptr = CUBLAS_Context.get_instance().get_context(A.device) - ptrA = get_ptr(A) - ptrOut = get_ptr(out) func(ptr, get_ptr(A), get_ptr(out), dim1, dim2) return out, new_state ===========changed ref 3=========== # module: bitsandbytes.functional + def create_linear_map(signed=True, total_bits=8, add_zero=True): - def create_linear_map(signed=True): + sign = (-1.0 if signed else 0.0) + total_values = 2**total_bits + if add_zero or total_bits < 8: + # add a zero + # since we simulate less bits by having zeros in the data type, we + # we need to center the quantization around zero and as such lose + # a single value + total_values = (2**total_bits if not signed else 2**total_bits-1) + + values = torch.linspace(sign, 1.0, total_values) + gap = 256 - values.numel() + if gap == 0: + return values - if signed: - return torch.linspace(-1.0, 1.0, 256) else: + l = values.numel()//2 + #return torch.Tensor(values[:l].tolist() + [-1e-6]*((gap//2)-1) + [0]*2 + [1e-6]*((gap//2)-1) + values[l:].tolist()) + return torch.Tensor(values[:l].tolist() + [0]*gap + values[l:].tolist()) - return torch.linspace(0.0, 1.0, 256) ===========changed ref 4=========== # module: bitsandbytes.functional + def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2, total_bits=8): - def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2): e = exponent_bits p = precision_bits + has_sign = 1 if signed else 0 + assert e+p == total_bits-has_sign - assert e+p == 7 # the exponent is biased to 2^(e-1) -1 == 0 evalues = [] pvalues = [] + for i, val in enumerate(range(-((2**(exponent_bits-has_sign))), 2**(exponent_bits-has_sign), 1)): - for i, val in enumerate(range(-((2**(exponent_bits-1))), 2**(exponent_bits-1), 1)): evalues.append(2**val) + values = [] lst = list(itertools.product([0, 1], repeat=precision_bits)) + #for ev in evalues: + bias = 2**(exponent_bits-1)-1 + for evalue in range(2**(exponent_bits)): + for bit_pattern in lst: - for bit_pattern in lst: + value = (1 if evalue != 0 else 0) - value = 1 + for i, pval in enumerate(list(bit_pattern)): - for i, pval in enumerate(list(bit_pattern)): + value += pval*(2**-(i+1)) - value += pval*(2**-(i+1)) + if evalue == 0: + # subnormals + value = value*2**-(bias-1) + else: + # normals + value = value*2**-(evalue-bias-2) + values.append(value) - pvalues.append(value) + if signed: + values.append(-value) + + assert len(</s>
bitsandbytes.functional/histogram_scatter_add_2d
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<13>:<add> is_on_gpu([histogram, index1, index2, source]) <del> is_on_gpu([histogram, index1, index2d, source])
# module: bitsandbytes.functional def histogram_scatter_add_2d( histogram: Tensor, index1: Tensor, index2: Tensor, source: Tensor ): <0> assert len(histogram.shape) == 2 <1> assert histogram.dtype == torch.float32 <2> assert source.dtype == torch.float32 <3> assert index1.dtype == torch.int32 <4> assert index2.dtype == torch.int32 <5> <6> assert histogram.device.type == "cuda" <7> assert index1.device.type == "cuda" <8> assert index2.device.type == "cuda" <9> assert source.device.type == "cuda" <10> <11> maxdim1 = ct.c_int32(histogram.shape[0]) <12> n = ct.c_int32(index1.numel()) <13> is_on_gpu([histogram, index1, index2d, source]) <14> lib.chistogram_scatter_add_2d(get_ptr(histogram), get_ptr(index1), get_ptr(index2), get_ptr(source), maxdim1, n) <15>
===========changed ref 0=========== # module: bitsandbytes.functional + def create_quantile_map(A, total_bits=8): + q = estimate_quantiles(A, num_quantiles=2**total_bits-1) + q = q.tolist() + q.append(0) + + gap = 256 - len(q) + for i in range(gap): + q.append(0) + + q.sort() + + q = Tensor(q) + q = q/q.abs().max() + return q + ===========changed ref 1=========== # module: bitsandbytes.functional def get_special_format_str(): if not torch.cuda.is_available(): return 'col_turing' + major, _minor = torch.cuda.get_device_capability() - major, minor = torch.cuda.get_device_capability() if major <= 7: return "col_turing" + if major == 8: - elif major == 8: return "col_ampere" - else: + return "col_turing" - return "col_turing" ===========changed ref 2=========== # module: bitsandbytes.functional def nvidia_transform( A, to_order, from_order="row", out=None, transpose=False, state=None, ld=None, ): if state is None: state = (A.shape, from_order) else: from_order = state[1] if out is None: out, new_state = get_transform_buffer( state[0], A.dtype, A.device, to_order, state[1] ) else: new_state = (state[1], to_order) func = get_transform_func(A.dtype, from_order, to_order, transpose) shape = state[0] if len(shape) == 2: dim1 = ct.c_int32(shape[0]) dim2 = ct.c_int32(shape[1]) elif ld is not None: n = prod(shape) dim1 = prod([shape[i] for i in ld]) dim2 = ct.c_int32(n // dim1) dim1 = ct.c_int32(dim1) else: dim1 = ct.c_int32(shape[0] * shape[1]) dim2 = ct.c_int32(shape[2]) ptr = CUBLAS_Context.get_instance().get_context(A.device) - ptrA = get_ptr(A) - ptrOut = get_ptr(out) func(ptr, get_ptr(A), get_ptr(out), dim1, dim2) return out, new_state ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 4=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 6=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 9=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 14=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 15=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 16=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 17=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def print_log_stack(self): - for msg, is_warning in self.cuda_setup_log: - if is_warning: - warn(msg) - else: - print(msg) - ===========changed ref 18=========== # module: bitsandbytes.cuda_setup.main + def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + ===========changed ref 19=========== # module: bitsandbytes.autograd._functions tensor = torch.Tensor """ This class pools outlier dimensions across layers. + This is particularly important for small models where outlier features - This is particularly important for small models where outlier features are less systematic and occur with low frequency. """ ===========changed ref 20=========== # module: bitsandbytes.cuda_setup.main + def is_cublasLt_compatible(cc): + has_cublaslt = False + if cc is not None: + cc_major, cc_minor = cc.split('.') + if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5): + cuda_setup.add_log_entry("WARNING: Compute capability < 7.5 detected! Proceeding to load CPU-only library...", is_warning=True) + else: + has_cublaslt = True + return has_cublaslt +
bitsandbytes.functional/transform
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<14>:<del> ptrA = get_ptr(A) <15>:<del> ptrOut = get_ptr(out)
# module: bitsandbytes.functional def transform(A, to_order, from_order='row', out=None, transpose=False, state=None, ld=None): <0> prev_device = pre_call(A.device) <1> if state is None: state = (A.shape, from_order) <2> else: from_order = state[1] <3> if out is None: out, new_state = get_transform_buffer(state[0], A.dtype, A.device, to_order, state[1], transpose) <4> else: new_state = (state[0], to_order) # (shape, order) <5> <6> shape = state[0] <7> if len(shape) == 2: <8> dim1 = ct.c_int32(shape[0]) <9> dim2 = ct.c_int32(shape[1]) <10> else: <11> dim1 = ct.c_int32(shape[0] * shape[1]) <12> dim2 = ct.c_int32(shape[2]) <13> <14> ptrA = get_ptr(A) <15> ptrOut = get_ptr(out) <16> is_on_gpu([A, out]) <17> if to_order == 'col32': <18> if transpose: <19> lib.ctransform_row2col32T(get_ptr(A), get_ptr(out), dim1, dim2) <20> else: <21> lib.ctransform_row2col32(get_ptr(A), get_ptr(out), dim1, dim2) <22> elif to_order == "col_turing": <23> if transpose: <24> lib.ctransform_row2turingT(get_ptr(A), get_ptr(out), dim1, dim2) <25> else: <26> lib.ctransform_row2turing(get_ptr(A), get_ptr(out), dim1, dim2) <27> elif to_order == "col_ampere": <28> if transpose: <29> lib.ctransform_row2ampereT(get_ptr</s>
===========below chunk 0=========== # module: bitsandbytes.functional def transform(A, to_order, from_order='row', out=None, transpose=False, state=None, ld=None): # offset: 1 else: lib.ctransform_row2ampere(get_ptr(A), get_ptr(out), dim1, dim2) elif to_order == "row": if from_order == "col_turing": lib.ctransform_turing2row(get_ptr(A), get_ptr(out), dim1, dim2) elif from_order == "col_ampere": lib.ctransform_ampere2row(get_ptr(A), get_ptr(out), dim1, dim2) else: raise NotImplementedError(f'Transform function not implemented: From {from_order} to {to_order}') post_call(prev_device) return out, new_state ===========changed ref 0=========== # module: bitsandbytes.functional def histogram_scatter_add_2d( histogram: Tensor, index1: Tensor, index2: Tensor, source: Tensor ): assert len(histogram.shape) == 2 assert histogram.dtype == torch.float32 assert source.dtype == torch.float32 assert index1.dtype == torch.int32 assert index2.dtype == torch.int32 assert histogram.device.type == "cuda" assert index1.device.type == "cuda" assert index2.device.type == "cuda" assert source.device.type == "cuda" maxdim1 = ct.c_int32(histogram.shape[0]) n = ct.c_int32(index1.numel()) + is_on_gpu([histogram, index1, index2, source]) - is_on_gpu([histogram, index1, index2d, source]) lib.chistogram_scatter_add_2d(get_ptr(histogram), get_ptr(index1), get_ptr(index2), get_ptr(source), maxdim1, n) ===========changed ref 1=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 2=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 4=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 7=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 12=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 14=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 15=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def print_log_stack(self): - for msg, is_warning in self.cuda_setup_log: - if is_warning: - warn(msg) - else: - print(msg) - ===========changed ref 16=========== # module: bitsandbytes.cuda_setup.main + def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + ===========changed ref 17=========== # module: bitsandbytes.autograd._functions tensor = torch.Tensor """ This class pools outlier dimensions across layers. + This is particularly important for small models where outlier features - This is particularly important for small models where outlier features are less systematic and occur with low frequency. """ ===========changed ref 18=========== # module: bitsandbytes.functional + def create_quantile_map(A, total_bits=8): + q = estimate_quantiles(A, num_quantiles=2**total_bits-1) + q = q.tolist() + q.append(0) + + gap = 256 - len(q) + for i in range(gap): + q.append(0) + + q.sort() + + q = Tensor(q) + q = q/q.abs().max() + return q + ===========changed ref 19=========== # module: bitsandbytes.functional def get_special_format_str(): if not torch.cuda.is_available(): return 'col_turing' + major, _minor = torch.cuda.get_device_capability() - major, minor = torch.cuda.get_device_capability() if major <= 7: return "col_turing" + if major == 8: - elif major == 8: return "col_ampere" - else: + return "col_turing" - return "col_turing"
bitsandbytes.optim.optimizer/Optimizer8bit.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> super().__init__(params, defaults) <del> super(Optimizer8bit, self).__init__(params, defaults) <5>:<add> self.non_castable_tensor_keys = { <del> self.non_castable_tensor_keys = set( <6>:<del> [ <19>:<del> ] <20>:<add> } <del> )
# module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __init__(self, params, defaults, optim_bits=32): <0> super(Optimizer8bit, self).__init__(params, defaults) <1> self.initialized = False <2> self.name2qmap = {} <3> <4> self.mng = GlobalOptimManager.get_instance() <5> self.non_castable_tensor_keys = set( <6> [ <7> "qmap1", <8> "qmap2", <9> "max1", <10> "max2", <11> "new_max1", <12> "new_max2", <13> "state1", <14> "state2", <15> "gnorm_vec", <16> "absmax1", <17> "absmax2", <18> "unorm_vec", <19> ] <20> ) <21> <22> if optim_bits == 8: <23> self.fill_qmap() <24>
===========unchanged ref 0=========== at: bitsandbytes.optim.optimizer GlobalOptimManager() at: bitsandbytes.optim.optimizer.GlobalOptimManager _instance = None get_instance() at: bitsandbytes.optim.optimizer.Optimizer8bit.step self.initialized = True at: torch.optim.optimizer.Optimizer OptimizerPreHook: TypeAlias = Callable[[Self, Args, Kwargs], Optional[Tuple[Args, Kwargs]]] # type: ignore[misc] OptimizerPostHook: TypeAlias = Callable[[Self, Args, Kwargs], None] # type: ignore[misc] _optimizer_step_pre_hooks: Dict[int, OptimizerPreHook] _optimizer_step_post_hooks: Dict[int, OptimizerPostHook] _optimizer_state_dict_pre_hooks: 'OrderedDict[int, Callable[["Optimizer"], None]]' _optimizer_state_dict_post_hooks: 'OrderedDict[int, Callable[["Optimizer", StateDict], Optional[StateDict]]]' _optimizer_load_state_dict_pre_hooks: 'OrderedDict[int, Callable[["Optimizer", StateDict], Optional[StateDict]]]' _optimizer_load_state_dict_post_hooks: 'OrderedDict[int, Callable[["Optimizer"], None]]' __init__(self, params: params_t, defaults: Dict[str, Any]) -> None __init__(params: params_t, defaults: Dict[str, Any]) -> None ===========changed ref 0=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 1=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 3=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 6=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 11=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 14=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def print_log_stack(self): - for msg, is_warning in self.cuda_setup_log: - if is_warning: - warn(msg) - else: - print(msg) - ===========changed ref 15=========== # module: bitsandbytes.cuda_setup.main + def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + ===========changed ref 16=========== # module: bitsandbytes.autograd._functions tensor = torch.Tensor """ This class pools outlier dimensions across layers. + This is particularly important for small models where outlier features - This is particularly important for small models where outlier features are less systematic and occur with low frequency. """ ===========changed ref 17=========== # module: bitsandbytes.functional + def create_quantile_map(A, total_bits=8): + q = estimate_quantiles(A, num_quantiles=2**total_bits-1) + q = q.tolist() + q.append(0) + + gap = 256 - len(q) + for i in range(gap): + q.append(0) + + q.sort() + + q = Tensor(q) + q = q/q.abs().max() + return q + ===========changed ref 18=========== # module: bitsandbytes.functional def get_special_format_str(): if not torch.cuda.is_available(): return 'col_turing' + major, _minor = torch.cuda.get_device_capability() - major, minor = torch.cuda.get_device_capability() if major <= 7: return "col_turing" + if major == 8: - elif major == 8: return "col_ampere" - else: + return "col_turing" - return "col_turing" ===========changed ref 19=========== # module: bitsandbytes.cuda_setup.main + def is_cublasLt_compatible(cc): + has_cublaslt = False + if cc is not None: + cc_major, cc_minor = cc.split('.') + if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5): + cuda_setup.add_log_entry("WARNING: Compute capability < 7.5 detected! Proceeding to load CPU-only library...", is_warning=True) + else: + has_cublaslt = True + return has_cublaslt +
bitsandbytes.optim.optimizer/Optimizer8bit.__setstate__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> super().__setstate__(state) <del> super(Optimizer8bit, self).__setstate__(state)
# module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): <0> super(Optimizer8bit, self).__setstate__(state) <1>
===========unchanged ref 0=========== at: torch.optim.optimizer.Optimizer _disable_dynamo(fn=None) state_dict(fn, /, *args, fn, **kwargs) _disable_dynamo(fn=None) load_state_dict(fn, /, *args, fn, **kwargs) ===========changed ref 0=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __init__(self, params, defaults, optim_bits=32): + super().__init__(params, defaults) - super(Optimizer8bit, self).__init__(params, defaults) self.initialized = False self.name2qmap = {} self.mng = GlobalOptimManager.get_instance() + self.non_castable_tensor_keys = { - self.non_castable_tensor_keys = set( - [ "qmap1", "qmap2", "max1", "max2", "new_max1", "new_max2", "state1", "state2", "gnorm_vec", "absmax1", "absmax2", "unorm_vec", - ] + } - ) if optim_bits == 8: self.fill_qmap() ===========changed ref 1=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 2=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 4=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 7=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 12=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 14=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 15=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def print_log_stack(self): - for msg, is_warning in self.cuda_setup_log: - if is_warning: - warn(msg) - else: - print(msg) - ===========changed ref 16=========== # module: bitsandbytes.cuda_setup.main + def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + ===========changed ref 17=========== # module: bitsandbytes.autograd._functions tensor = torch.Tensor """ This class pools outlier dimensions across layers. + This is particularly important for small models where outlier features - This is particularly important for small models where outlier features are less systematic and occur with low frequency. """ ===========changed ref 18=========== # module: bitsandbytes.functional + def create_quantile_map(A, total_bits=8): + q = estimate_quantiles(A, num_quantiles=2**total_bits-1) + q = q.tolist() + q.append(0) + + gap = 256 - len(q) + for i in range(gap): + q.append(0) + + q.sort() + + q = Tensor(q) + q = q/q.abs().max() + return q + ===========changed ref 19=========== # module: bitsandbytes.functional def get_special_format_str(): if not torch.cuda.is_available(): return 'col_turing' + major, _minor = torch.cuda.get_device_capability() - major, minor = torch.cuda.get_device_capability() if major <= 7: return "col_turing" + if major == 8: - elif major == 8: return "col_ampere" - else: + return "col_turing" - return "col_turing" ===========changed ref 20=========== # module: bitsandbytes.cuda_setup.main + def is_cublasLt_compatible(cc): + has_cublaslt = False + if cc is not None: + cc_major, cc_minor = cc.split('.') + if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5): + cuda_setup.add_log_entry("WARNING: Compute capability < 7.5 detected! Proceeding to load CPU-only library...", is_warning=True) + else: + has_cublaslt = True + return has_cublaslt +
bitsandbytes.optim.optimizer/Optimizer8bit.init_state
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> raise NotImplementedError("init_state method needs to be overridden") <del> raise NotImplementedError(f"init_state method needs to be overidden")
# module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): <0> raise NotImplementedError(f"init_state method needs to be overidden") <1>
===========changed ref 0=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 1=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __init__(self, params, defaults, optim_bits=32): + super().__init__(params, defaults) - super(Optimizer8bit, self).__init__(params, defaults) self.initialized = False self.name2qmap = {} self.mng = GlobalOptimManager.get_instance() + self.non_castable_tensor_keys = { - self.non_castable_tensor_keys = set( - [ "qmap1", "qmap2", "max1", "max2", "new_max1", "new_max2", "state1", "state2", "gnorm_vec", "absmax1", "absmax2", "unorm_vec", - ] + } - ) if optim_bits == 8: self.fill_qmap() ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 3=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 5=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 8=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 13=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 14=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 15=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 16=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def print_log_stack(self): - for msg, is_warning in self.cuda_setup_log: - if is_warning: - warn(msg) - else: - print(msg) - ===========changed ref 17=========== # module: bitsandbytes.cuda_setup.main + def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + ===========changed ref 18=========== # module: bitsandbytes.autograd._functions tensor = torch.Tensor """ This class pools outlier dimensions across layers. + This is particularly important for small models where outlier features - This is particularly important for small models where outlier features are less systematic and occur with low frequency. """ ===========changed ref 19=========== # module: bitsandbytes.functional + def create_quantile_map(A, total_bits=8): + q = estimate_quantiles(A, num_quantiles=2**total_bits-1) + q = q.tolist() + q.append(0) + + gap = 256 - len(q) + for i in range(gap): + q.append(0) + + q.sort() + + q = Tensor(q) + q = q/q.abs().max() + return q + ===========changed ref 20=========== # module: bitsandbytes.functional def get_special_format_str(): if not torch.cuda.is_available(): return 'col_turing' + major, _minor = torch.cuda.get_device_capability() - major, minor = torch.cuda.get_device_capability() if major <= 7: return "col_turing" + if major == 8: - elif major == 8: return "col_ampere" - else: + return "col_turing" - return "col_turing" ===========changed ref 21=========== # module: bitsandbytes.cuda_setup.main + def is_cublasLt_compatible(cc): + has_cublaslt = False + if cc is not None: + cc_major, cc_minor = cc.split('.') + if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5): + cuda_setup.add_log_entry("WARNING: Compute capability < 7.5 detected! Proceeding to load CPU-only library...", is_warning=True) + else: + has_cublaslt = True + return has_cublaslt +
bitsandbytes.optim.optimizer/Optimizer8bit.update_step
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<1>:<add> "The update_step method needs to be overridden" <del> f"The update_step method needs to be overidden"
# module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): <0> raise NotImplementedError( <1> f"The update_step method needs to be overidden" <2> ) <3>
===========changed ref 0=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 1=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 2=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __init__(self, params, defaults, optim_bits=32): + super().__init__(params, defaults) - super(Optimizer8bit, self).__init__(params, defaults) self.initialized = False self.name2qmap = {} self.mng = GlobalOptimManager.get_instance() + self.non_castable_tensor_keys = { - self.non_castable_tensor_keys = set( - [ "qmap1", "qmap2", "max1", "max2", "new_max1", "new_max2", "state1", "state2", "gnorm_vec", "absmax1", "absmax2", "unorm_vec", - ] + } - ) if optim_bits == 8: self.fill_qmap() ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 4=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 6=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 9=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 14=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 15=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 16=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 17=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def print_log_stack(self): - for msg, is_warning in self.cuda_setup_log: - if is_warning: - warn(msg) - else: - print(msg) - ===========changed ref 18=========== # module: bitsandbytes.cuda_setup.main + def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + ===========changed ref 19=========== # module: bitsandbytes.autograd._functions tensor = torch.Tensor """ This class pools outlier dimensions across layers. + This is particularly important for small models where outlier features - This is particularly important for small models where outlier features are less systematic and occur with low frequency. """ ===========changed ref 20=========== # module: bitsandbytes.functional + def create_quantile_map(A, total_bits=8): + q = estimate_quantiles(A, num_quantiles=2**total_bits-1) + q = q.tolist() + q.append(0) + + gap = 256 - len(q) + for i in range(gap): + q.append(0) + + q.sort() + + q = Tensor(q) + q = q/q.abs().max() + return q + ===========changed ref 21=========== # module: bitsandbytes.functional def get_special_format_str(): if not torch.cuda.is_available(): return 'col_turing' + major, _minor = torch.cuda.get_device_capability() - major, minor = torch.cuda.get_device_capability() if major <= 7: return "col_turing" + if major == 8: - elif major == 8: return "col_ampere" - else: + return "col_turing" - return "col_turing" ===========changed ref 22=========== # module: bitsandbytes.cuda_setup.main + def is_cublasLt_compatible(cc): + has_cublaslt = False + if cc is not None: + cc_major, cc_minor = cc.split('.') + if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5): + cuda_setup.add_log_entry("WARNING: Compute capability < 7.5 detected! Proceeding to load CPU-only library...", is_warning=True) + else: + has_cublaslt = True + return has_cublaslt +
bitsandbytes.optim.optimizer/Optimizer2State.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<1>:<add> raise ValueError(f"Invalid learning rate: {lr}") <del> raise ValueError("Invalid learning rate: {}".format(lr)) <3>:<add> raise ValueError(f"Invalid epsilon value: {eps}") <del> raise ValueError("Invalid epsilon value: {}".format(eps)) <15>:<add> f"Invalid weight_decay value: {weight_decay}" <del> "Invalid weight_decay value: {}".format(weight_decay) <18>:<add> super().__init__(params, defaults, optim_bits) <del> super(Optimizer2State, self).__init__(params, defaults, optim_bits)
<s> __init__( self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): <0> if not 0.0 <= lr: <1> raise ValueError("Invalid learning rate: {}".format(lr)) <2> if not 0.0 <= eps: <3> raise ValueError("Invalid epsilon value: {}".format(eps)) <4> if isinstance(betas, str): <5> # format: '(beta1, beta2)' <6> betas = betas.replace("(", "").replace(")", "").strip().split(",") <7> betas = [float(b) for b in betas] <8> for i in range(len(betas)): <9> if not 0.0 <= betas[i] < 1.0: <10> raise ValueError( <11> f"Invalid beta parameter at index {i}: {betas[i]}" <12> ) <13> if not 0.0 <= weight_decay: <14> raise ValueError( <15> "Invalid weight_decay value: {}".format(weight_decay) <16> ) <17> defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) <18> super(Optimizer2State, self).__init__(params, defaults, optim_bits) <19> <20> if args is None: <21> args = {} <22> args["optim_bits"] = optim_bits <23> args["percentile_clipping"] = 100 <24> args["min_8bit_size"] = min_8bit_size <25> args["percentile_clipping"] = percentile_clipping <26> args["block_wise"] = block_wise <27> args["max_unorm"] = max_unorm <28> args</s>
===========below chunk 0=========== <s> self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): # offset: 1 self.args = MockArgs(args) else: self.args = args self.optimizer_name = optimizer_name ===========unchanged ref 0=========== at: bitsandbytes.optim.optimizer MockArgs(initial_data) at: bitsandbytes.optim.optimizer.Optimizer8bit __init__(params, defaults, optim_bits=32) at: torch.autograd.grad_mode no_grad() ===========changed ref 0=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __init__(self, params, defaults, optim_bits=32): + super().__init__(params, defaults) - super(Optimizer8bit, self).__init__(params, defaults) self.initialized = False self.name2qmap = {} self.mng = GlobalOptimManager.get_instance() + self.non_castable_tensor_keys = { - self.non_castable_tensor_keys = set( - [ "qmap1", "qmap2", "max1", "max2", "new_max1", "new_max2", "state1", "state2", "gnorm_vec", "absmax1", "absmax2", "unorm_vec", - ] + } - ) if optim_bits == 8: self.fill_qmap() ===========changed ref 1=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 2=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 3=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 5=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 7=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 10=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 14=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 15=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 16=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 17=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 18=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def print_log_stack(self): - for msg, is_warning in self.cuda_setup_log: - if is_warning: - warn(msg) - else: - print(msg) - ===========changed ref 19=========== # module: bitsandbytes.cuda_setup.main + def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + ===========changed ref 20=========== # module: bitsandbytes.autograd._functions tensor = torch.Tensor """ This class pools outlier dimensions across layers. + This is particularly important for small models where outlier features - This is particularly important for small models where outlier features are less systematic and occur with low frequency. """ ===========changed ref 21=========== # module: bitsandbytes.functional + def create_quantile_map(A, total_bits=8): + q = estimate_quantiles(A, num_quantiles=2**total_bits-1) + q = q.tolist() + q.append(0) + + gap = 256 - len(q) + for i in range(gap): + q.append(0) + + q.sort() + + q = Tensor(q) + q = q/q.abs().max() + return q +
bitsandbytes.optim.optimizer/Optimizer1State.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<1>:<add> raise ValueError(f"Invalid learning rate: {lr}") <del> raise ValueError("Invalid learning rate: {}".format(lr)) <3>:<add> raise ValueError(f"Invalid epsilon value: {eps}") <del> raise ValueError("Invalid epsilon value: {}".format(eps)) <11>:<add> f"Invalid weight_decay value: {weight_decay}" <del> "Invalid weight_decay value: {}".format(weight_decay) <14>:<add> super().__init__(params, defaults, optim_bits) <del> super(Optimizer1State, self).__init__(params, defaults, optim_bits)
<s> __init__( self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): <0> if not 0.0 <= lr: <1> raise ValueError("Invalid learning rate: {}".format(lr)) <2> if not 0.0 <= eps: <3> raise ValueError("Invalid epsilon value: {}".format(eps)) <4> for i in range(len(betas)): <5> if not 0.0 <= betas[i] < 1.0: <6> raise ValueError( <7> f"Invalid beta parameter at index {i}: {betas[i]}" <8> ) <9> if not 0.0 <= weight_decay: <10> raise ValueError( <11> "Invalid weight_decay value: {}".format(weight_decay) <12> ) <13> defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) <14> super(Optimizer1State, self).__init__(params, defaults, optim_bits) <15> <16> if args is None: <17> args = {} <18> args["optim_bits"] = optim_bits <19> args["percentile_clipping"] = 100 <20> args["min_8bit_size"] = min_8bit_size <21> args["percentile_clipping"] = percentile_clipping <22> args["block_wise"] = block_wise <23> args["max_unorm"] = max_unorm <24> args["skip_zeros"] = skip_zeros <25> <26> self.args = MockArgs(args) <27> else: <28> self.args = args <29> <30> self.optimizer_name = optimizer_name <31>
===========unchanged ref 0=========== at: bitsandbytes.optim.optimizer MockArgs(initial_data) at: bitsandbytes.optim.optimizer.Optimizer8bit __init__(params, defaults, optim_bits=32) at: torch.autograd.grad_mode no_grad() ===========changed ref 0=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __init__(self, params, defaults, optim_bits=32): + super().__init__(params, defaults) - super(Optimizer8bit, self).__init__(params, defaults) self.initialized = False self.name2qmap = {} self.mng = GlobalOptimManager.get_instance() + self.non_castable_tensor_keys = { - self.non_castable_tensor_keys = set( - [ "qmap1", "qmap2", "max1", "max2", "new_max1", "new_max2", "state1", "state2", "gnorm_vec", "absmax1", "absmax2", "unorm_vec", - ] + } - ) if optim_bits == 8: self.fill_qmap() ===========changed ref 1=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 2=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 3=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 4=========== <s> __init__( self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") - raise ValueError("Invalid epsilon value: {}".format(eps)) if isinstance(betas, str): # format: '(beta1, beta2)' betas = betas.replace("(", "").replace(")", "").strip().split(",") betas = [float(b) for b in betas] for i in range(len(betas)): if not 0.0 <= betas[i] < 1.0: raise ValueError( f"Invalid beta parameter at index {i}: {betas[i]}" ) if not 0.0 <= weight_decay: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + super().__init__(params, defaults, optim_bits) - super(Optimizer2State, self).__init__(params, defaults, optim_bits) if args is None: args = {} args["optim_bits"] = optim_bits args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args</s> ===========changed ref 5=========== <s> self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): # offset: 1 <s> args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args["percentile_clipping"] = percentile_clipping args["block_wise"] = block_wise args["max_unorm"] = max_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else: self.args = args self.optimizer_name = optimizer_name ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 7=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 9=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 12=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 14=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 15=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 16=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 17=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance -
bitsandbytes.optim.adagrad/Adagrad.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<1>:<add> raise ValueError(f"Invalid learning rate: {lr}") <del> raise ValueError("Invalid learning rate: {}".format(lr)) <4>:<add> f"Invalid weight_decay value: {weight_decay}" <del> "Invalid weight_decay value: {}".format(weight_decay) <7>:<add> raise ValueError(f"Invalid epsilon value: {eps}") <del> raise ValueError("Invalid epsilon value: {}".format(eps)) <12>:<add> super().__init__( <del> super(Adagrad, self).__init__(
<s>andbytes.optim.adagrad class Adagrad(Optimizer1State): def __init__( self, params, lr=1e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0, eps=1e-10, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): <0> if not 0.0 <= lr: <1> raise ValueError("Invalid learning rate: {}".format(lr)) <2> if not 0.0 <= weight_decay: <3> raise ValueError( <4> "Invalid weight_decay value: {}".format(weight_decay) <5> ) <6> if not 0.0 <= eps: <7> raise ValueError("Invalid epsilon value: {}".format(eps)) <8> if initial_accumulator_value != 0.0: <9> raise ValueError("Initial accumulator value != 0.0 not supported!") <10> if lr_decay != 0.0: <11> raise ValueError("Lr Decay != 0.0 not supported!") <12> super(Adagrad, self).__init__( <13> "adagrad", <14> params, <15> lr, <16> (0.0, 0.0), <17> eps, <18> weight_decay, <19> optim_bits, <20> args, <21> min_8bit_size, <22> percentile_clipping, <23> block_wise, <24> ) <25>
===========unchanged ref 0=========== at: bitsandbytes.optim.optimizer.Optimizer1State __init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) __init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) ===========changed ref 0=========== <s> __init__( self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") - raise ValueError("Invalid epsilon value: {}".format(eps)) for i in range(len(betas)): if not 0.0 <= betas[i] < 1.0: raise ValueError( f"Invalid beta parameter at index {i}: {betas[i]}" ) if not 0.0 <= weight_decay: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + super().__init__(params, defaults, optim_bits) - super(Optimizer1State, self).__init__(params, defaults, optim_bits) if args is None: args = {} args["optim_bits"] = optim_bits args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args["percentile_clipping"] = percentile_clipping args["block_wise"] = block_wise args["max_unorm"] = max_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else</s> ===========changed ref 1=========== <s> self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): # offset: 1 <s>_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else: self.args = args self.optimizer_name = optimizer_name ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 3=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 5=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 8=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 10=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 12=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 14=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 15=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 16=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 17=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 18=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } +
bitsandbytes.optim.adagrad/Adagrad8bit.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<1>:<add> raise ValueError(f"Invalid learning rate: {lr}") <del> raise ValueError("Invalid learning rate: {}".format(lr)) <4>:<add> f"Invalid weight_decay value: {weight_decay}" <del> "Invalid weight_decay value: {}".format(weight_decay) <7>:<add> raise ValueError(f"Invalid epsilon value: {eps}") <del> raise ValueError("Invalid epsilon value: {}".format(eps)) <13>:<add> super().__init__( <del> super(Adagrad8bit, self).__init__(
<s>.optim.adagrad class Adagrad8bit(Optimizer1State): def __init__( self, params, lr=1e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0, eps=1e-10, optim_bits=8, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): <0> if not 0.0 <= lr: <1> raise ValueError("Invalid learning rate: {}".format(lr)) <2> if not 0.0 <= weight_decay: <3> raise ValueError( <4> "Invalid weight_decay value: {}".format(weight_decay) <5> ) <6> if not 0.0 <= eps: <7> raise ValueError("Invalid epsilon value: {}".format(eps)) <8> if initial_accumulator_value != 0.0: <9> raise ValueError("Initial accumulator value != 0.0 not supported!") <10> if lr_decay != 0.0: <11> raise ValueError("Lr Decay != 0.0 not supported!") <12> assert block_wise <13> super(Adagrad8bit, self).__init__( <14> "adagrad", <15> params, <16> lr, <17> (0.0, 0.0), <18> eps, <19> weight_decay, <20> 8, <21> args, <22> min_8bit_size, <23> percentile_clipping, <24> block_wise, <25> ) <26>
===========unchanged ref 0=========== at: bitsandbytes.optim.optimizer.Optimizer1State __init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) __init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) ===========changed ref 0=========== <s> __init__( self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") - raise ValueError("Invalid epsilon value: {}".format(eps)) for i in range(len(betas)): if not 0.0 <= betas[i] < 1.0: raise ValueError( f"Invalid beta parameter at index {i}: {betas[i]}" ) if not 0.0 <= weight_decay: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + super().__init__(params, defaults, optim_bits) - super(Optimizer1State, self).__init__(params, defaults, optim_bits) if args is None: args = {} args["optim_bits"] = optim_bits args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args["percentile_clipping"] = percentile_clipping args["block_wise"] = block_wise args["max_unorm"] = max_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else</s> ===========changed ref 1=========== <s> self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): # offset: 1 <s>_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else: self.args = args self.optimizer_name = optimizer_name ===========changed ref 2=========== <s>andbytes.optim.adagrad class Adagrad(Optimizer1State): def __init__( self, params, lr=1e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0, eps=1e-10, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= weight_decay: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") - raise ValueError("Invalid epsilon value: {}".format(eps)) if initial_accumulator_value != 0.0: raise ValueError("Initial accumulator value != 0.0 not supported!") if lr_decay != 0.0: raise ValueError("Lr Decay != 0.0 not supported!") + super().__init__( - super(Adagrad, self).__init__( "adagrad", params, lr, (0.0, 0.0), eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 4=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 6=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 9=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 11=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 13=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 14=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) +
bitsandbytes.optim.adagrad/Adagrad32bit.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<1>:<add> raise ValueError(f"Invalid learning rate: {lr}") <del> raise ValueError("Invalid learning rate: {}".format(lr)) <4>:<add> f"Invalid weight_decay value: {weight_decay}" <del> "Invalid weight_decay value: {}".format(weight_decay) <7>:<add> raise ValueError(f"Invalid epsilon value: {eps}") <del> raise ValueError("Invalid epsilon value: {}".format(eps)) <12>:<add> super().__init__( <del> super(Adagrad32bit, self).__init__(
<s>.optim.adagrad class Adagrad32bit(Optimizer1State): def __init__( self, params, lr=1e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0, eps=1e-10, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): <0> if not 0.0 <= lr: <1> raise ValueError("Invalid learning rate: {}".format(lr)) <2> if not 0.0 <= weight_decay: <3> raise ValueError( <4> "Invalid weight_decay value: {}".format(weight_decay) <5> ) <6> if not 0.0 <= eps: <7> raise ValueError("Invalid epsilon value: {}".format(eps)) <8> if initial_accumulator_value != 0.0: <9> raise ValueError("Initial accumulator value != 0.0 not supported!") <10> if lr_decay != 0.0: <11> raise ValueError("Lr Decay != 0.0 not supported!") <12> super(Adagrad32bit, self).__init__( <13> "adagrad", <14> params, <15> lr, <16> (0.0, 0.0), <17> eps, <18> weight_decay, <19> 32, <20> args, <21> min_8bit_size, <22> percentile_clipping, <23> block_wise, <24> ) <25>
===========unchanged ref 0=========== at: bitsandbytes.optim.optimizer.Optimizer1State __init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) __init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) ===========changed ref 0=========== <s> __init__( self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") - raise ValueError("Invalid epsilon value: {}".format(eps)) for i in range(len(betas)): if not 0.0 <= betas[i] < 1.0: raise ValueError( f"Invalid beta parameter at index {i}: {betas[i]}" ) if not 0.0 <= weight_decay: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + super().__init__(params, defaults, optim_bits) - super(Optimizer1State, self).__init__(params, defaults, optim_bits) if args is None: args = {} args["optim_bits"] = optim_bits args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args["percentile_clipping"] = percentile_clipping args["block_wise"] = block_wise args["max_unorm"] = max_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else</s> ===========changed ref 1=========== <s> self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): # offset: 1 <s>_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else: self.args = args self.optimizer_name = optimizer_name ===========changed ref 2=========== <s>.optim.adagrad class Adagrad8bit(Optimizer1State): def __init__( self, params, lr=1e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0, eps=1e-10, optim_bits=8, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= weight_decay: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") - raise ValueError("Invalid epsilon value: {}".format(eps)) if initial_accumulator_value != 0.0: raise ValueError("Initial accumulator value != 0.0 not supported!") if lr_decay != 0.0: raise ValueError("Lr Decay != 0.0 not supported!") assert block_wise + super().__init__( - super(Adagrad8bit, self).__init__( "adagrad", params, lr, (0.0, 0.0), eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 3=========== <s>andbytes.optim.adagrad class Adagrad(Optimizer1State): def __init__( self, params, lr=1e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0, eps=1e-10, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= weight_decay: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") - raise ValueError("Invalid epsilon value: {}".format(eps)) if initial_accumulator_value != 0.0: raise ValueError("Initial accumulator value != 0.0 not supported!") if lr_decay != 0.0: raise ValueError("Lr Decay != 0.0 not supported!") + super().__init__( - super(Adagrad, self).__init__( "adagrad", params, lr, (0.0, 0.0), eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 5=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 7=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) +
bitsandbytes.optim.adamw/AdamW.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> super().__init__( <del> super(AdamW, self).__init__(
<s>adamw class AdamW(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): <0> super(AdamW, self).__init__( <1> "adam", <2> params, <3> lr, <4> betas, <5> eps, <6> weight_decay, <7> optim_bits, <8> args, <9> min_8bit_size, <10> percentile_clipping, <11> block_wise, <12> ) <13>
===========unchanged ref 0=========== at: bitsandbytes.optim.adamw AdamW(params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True) at: bitsandbytes.optim.optimizer.Optimizer2State __init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) __init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) ===========changed ref 0=========== <s> __init__( self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") - raise ValueError("Invalid epsilon value: {}".format(eps)) if isinstance(betas, str): # format: '(beta1, beta2)' betas = betas.replace("(", "").replace(")", "").strip().split(",") betas = [float(b) for b in betas] for i in range(len(betas)): if not 0.0 <= betas[i] < 1.0: raise ValueError( f"Invalid beta parameter at index {i}: {betas[i]}" ) if not 0.0 <= weight_decay: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + super().__init__(params, defaults, optim_bits) - super(Optimizer2State, self).__init__(params, defaults, optim_bits) if args is None: args = {} args["optim_bits"] = optim_bits args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args</s> ===========changed ref 1=========== <s> self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): # offset: 1 <s> args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args["percentile_clipping"] = percentile_clipping args["block_wise"] = block_wise args["max_unorm"] = max_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else: self.args = args self.optimizer_name = optimizer_name ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 3=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 5=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 8=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 10=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 12=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 14=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 15=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 16=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance -
bitsandbytes.optim.adamw/AdamW8bit.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> super().__init__( <del> super(AdamW8bit, self).__init__(
<s> bitsandbytes.optim.adamw class AdamW8bit(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): <0> super(AdamW8bit, self).__init__( <1> "adam", <2> params, <3> lr, <4> betas, <5> eps, <6> weight_decay, <7> 8, <8> args, <9> min_8bit_size, <10> percentile_clipping, <11> block_wise, <12> ) <13>
===========unchanged ref 0=========== at: bitsandbytes.optim.adamw AdamW8bit(params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True) at: bitsandbytes.optim.optimizer.Optimizer2State __init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) __init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) ===========changed ref 0=========== <s> __init__( self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") - raise ValueError("Invalid epsilon value: {}".format(eps)) if isinstance(betas, str): # format: '(beta1, beta2)' betas = betas.replace("(", "").replace(")", "").strip().split(",") betas = [float(b) for b in betas] for i in range(len(betas)): if not 0.0 <= betas[i] < 1.0: raise ValueError( f"Invalid beta parameter at index {i}: {betas[i]}" ) if not 0.0 <= weight_decay: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + super().__init__(params, defaults, optim_bits) - super(Optimizer2State, self).__init__(params, defaults, optim_bits) if args is None: args = {} args["optim_bits"] = optim_bits args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args</s> ===========changed ref 1=========== <s> self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): # offset: 1 <s> args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args["percentile_clipping"] = percentile_clipping args["block_wise"] = block_wise args["max_unorm"] = max_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else: self.args = args self.optimizer_name = optimizer_name ===========changed ref 2=========== <s>adamw class AdamW(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): + super().__init__( - super(AdamW, self).__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 4=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 6=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 9=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 11=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 13=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 14=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 15=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" )
bitsandbytes.optim.adamw/AdamW32bit.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> super().__init__( <del> super(AdamW32bit, self).__init__(
<s> bitsandbytes.optim.adamw class AdamW32bit(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): <0> super(AdamW32bit, self).__init__( <1> "adam", <2> params, <3> lr, <4> betas, <5> eps, <6> weight_decay, <7> 32, <8> args, <9> min_8bit_size, <10> percentile_clipping, <11> block_wise, <12> ) <13>
===========unchanged ref 0=========== at: bitsandbytes.optim.adamw AdamW32bit(params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True) at: bitsandbytes.optim.optimizer.Optimizer2State __init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) __init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) ===========changed ref 0=========== <s> __init__( self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") - raise ValueError("Invalid epsilon value: {}".format(eps)) if isinstance(betas, str): # format: '(beta1, beta2)' betas = betas.replace("(", "").replace(")", "").strip().split(",") betas = [float(b) for b in betas] for i in range(len(betas)): if not 0.0 <= betas[i] < 1.0: raise ValueError( f"Invalid beta parameter at index {i}: {betas[i]}" ) if not 0.0 <= weight_decay: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + super().__init__(params, defaults, optim_bits) - super(Optimizer2State, self).__init__(params, defaults, optim_bits) if args is None: args = {} args["optim_bits"] = optim_bits args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args</s> ===========changed ref 1=========== <s> self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): # offset: 1 <s> args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args["percentile_clipping"] = percentile_clipping args["block_wise"] = block_wise args["max_unorm"] = max_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else: self.args = args self.optimizer_name = optimizer_name ===========changed ref 2=========== <s> bitsandbytes.optim.adamw class AdamW8bit(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): + super().__init__( - super(AdamW8bit, self).__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 3=========== <s>adamw class AdamW(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): + super().__init__( - super(AdamW, self).__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 5=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 7=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 10=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 12=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} +
bitsandbytes.optim.lars/LARS.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<2>:<add> "LARS without momentum is not supported!" <del> f"LARS without momentum is not supported!" <4>:<add> super().__init__( <del> super(LARS, self).__init__(
# module: bitsandbytes.optim.lars class LARS(Optimizer1State): def __init__( self, params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02, ): <0> if momentum == 0: <1> raise NotImplementedError( <2> f"LARS without momentum is not supported!" <3> ) <4> super(LARS, self).__init__( <5> "lars", <6> params, <7> lr, <8> (momentum, dampening), <9> 0.0, <10> weight_decay, <11> optim_bits, <12> args, <13> min_8bit_size, <14> percentile_clipping, <15> max_unorm=max_unorm, <16> block_wise=False, <17> ) <18>
===========changed ref 0=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 1=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 3=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 6=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 8=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 10=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 12=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 14=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 15=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 16=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 17=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def print_log_stack(self): - for msg, is_warning in self.cuda_setup_log: - if is_warning: - warn(msg) - else: - print(msg) - ===========changed ref 18=========== # module: bitsandbytes.cuda_setup.main + def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + ===========changed ref 19=========== # module: bitsandbytes.autograd._functions tensor = torch.Tensor """ This class pools outlier dimensions across layers. + This is particularly important for small models where outlier features - This is particularly important for small models where outlier features are less systematic and occur with low frequency. """ ===========changed ref 20=========== <s> bitsandbytes.optim.adamw class AdamW32bit(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): + super().__init__( - super(AdamW32bit, self).__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 21=========== <s> bitsandbytes.optim.adamw class AdamW8bit(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): + super().__init__( - super(AdamW8bit, self).__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 22=========== <s>adamw class AdamW(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): + super().__init__( - super(AdamW, self).__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, )
bitsandbytes.optim.lars/LARS8bit.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<2>:<add> "LARS without momentum is not supported!" <del> f"LARS without momentum is not supported!" <4>:<add> super().__init__( <del> super(LARS8bit, self).__init__(
# module: bitsandbytes.optim.lars class LARS8bit(Optimizer1State): def __init__( self, params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, args=None, min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02, ): <0> if momentum == 0: <1> raise NotImplementedError( <2> f"LARS without momentum is not supported!" <3> ) <4> super(LARS8bit, self).__init__( <5> "lars", <6> params, <7> lr, <8> (momentum, dampening), <9> 0.0, <10> weight_decay, <11> 8, <12> args, <13> min_8bit_size, <14> percentile_clipping, <15> max_unorm=max_unorm, <16> block_wise=False, <17> ) <18>
===========changed ref 0=========== # module: bitsandbytes.optim.lars class LARS(Optimizer1State): def __init__( self, params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02, ): if momentum == 0: raise NotImplementedError( + "LARS without momentum is not supported!" - f"LARS without momentum is not supported!" ) + super().__init__( - super(LARS, self).__init__( "lars", params, lr, (momentum, dampening), 0.0, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False, ) ===========changed ref 1=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 2=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 4=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 7=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 9=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 11=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 13=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 14=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 15=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 16=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 17=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 18=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def print_log_stack(self): - for msg, is_warning in self.cuda_setup_log: - if is_warning: - warn(msg) - else: - print(msg) - ===========changed ref 19=========== # module: bitsandbytes.cuda_setup.main + def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + ===========changed ref 20=========== # module: bitsandbytes.autograd._functions tensor = torch.Tensor """ This class pools outlier dimensions across layers. + This is particularly important for small models where outlier features - This is particularly important for small models where outlier features are less systematic and occur with low frequency. """ ===========changed ref 21=========== <s> bitsandbytes.optim.adamw class AdamW32bit(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): + super().__init__( - super(AdamW32bit, self).__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, )
bitsandbytes.optim.lars/LARS32bit.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<2>:<add> "LARS without momentum is not supported!" <del> f"LARS without momentum is not supported!" <4>:<add> super().__init__( <del> super(LARS32bit, self).__init__(
# module: bitsandbytes.optim.lars class LARS32bit(Optimizer1State): def __init__( self, params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, args=None, min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02, ): <0> if momentum == 0: <1> raise NotImplementedError( <2> f"LARS without momentum is not supported!" <3> ) <4> super(LARS32bit, self).__init__( <5> "lars", <6> params, <7> lr, <8> (momentum, dampening), <9> 0.0, <10> weight_decay, <11> 32, <12> args, <13> min_8bit_size, <14> percentile_clipping, <15> max_unorm=max_unorm, <16> block_wise=False, <17> ) <18>
===========changed ref 0=========== # module: bitsandbytes.optim.lars class LARS8bit(Optimizer1State): def __init__( self, params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, args=None, min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02, ): if momentum == 0: raise NotImplementedError( + "LARS without momentum is not supported!" - f"LARS without momentum is not supported!" ) + super().__init__( - super(LARS8bit, self).__init__( "lars", params, lr, (momentum, dampening), 0.0, weight_decay, 8, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False, ) ===========changed ref 1=========== # module: bitsandbytes.optim.lars class LARS(Optimizer1State): def __init__( self, params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02, ): if momentum == 0: raise NotImplementedError( + "LARS without momentum is not supported!" - f"LARS without momentum is not supported!" ) + super().__init__( - super(LARS, self).__init__( "lars", params, lr, (momentum, dampening), 0.0, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False, ) ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 3=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 5=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 8=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 10=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 12=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 14=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 15=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 16=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 17=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 18=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 19=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def print_log_stack(self): - for msg, is_warning in self.cuda_setup_log: - if is_warning: - warn(msg) - else: - print(msg) - ===========changed ref 20=========== # module: bitsandbytes.cuda_setup.main + def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + ===========changed ref 21=========== # module: bitsandbytes.autograd._functions tensor = torch.Tensor """ This class pools outlier dimensions across layers. + This is particularly important for small models where outlier features - This is particularly important for small models where outlier features are less systematic and occur with low frequency. """
bitsandbytes.optim.lars/PytorchLARS.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<1>:<add> raise ValueError(f"Invalid learning rate: {lr}") <del> raise ValueError("Invalid learning rate: {}".format(lr)) <3>:<add> raise ValueError(f"Invalid momentum value: {momentum}") <del> raise ValueError("Invalid momentum value: {}".format(momentum)) <6>:<add> f"Invalid weight_decay value: {weight_decay}" <del> "Invalid weight_decay value: {}".format(weight_decay) <21>:<add> super().__init__(params, defaults) <del> super(PytorchLARS, self).__init__(params, defaults)
# module: bitsandbytes.optim.lars class PytorchLARS(Optimizer): def __init__( self, params, lr=0.01, momentum=0, dampening=0, weight_decay=0, nesterov=False, max_unorm=0.02, ): <0> if lr < 0.0: <1> raise ValueError("Invalid learning rate: {}".format(lr)) <2> if momentum < 0.0: <3> raise ValueError("Invalid momentum value: {}".format(momentum)) <4> if weight_decay < 0.0: <5> raise ValueError( <6> "Invalid weight_decay value: {}".format(weight_decay) <7> ) <8> <9> defaults = dict( <10> lr=lr, <11> momentum=momentum, <12> dampening=dampening, <13> weight_decay=weight_decay, <14> nesterov=nesterov, <15> max_unorm=max_unorm, <16> ) <17> if nesterov and (momentum <= 0 or dampening != 0): <18> raise ValueError( <19> "Nesterov momentum requires a momentum and zero dampening" <20> ) <21> super(PytorchLARS, self).__init__(params, defaults) <22>
===========changed ref 0=========== # module: bitsandbytes.optim.lars class LARS32bit(Optimizer1State): def __init__( self, params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, args=None, min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02, ): if momentum == 0: raise NotImplementedError( + "LARS without momentum is not supported!" - f"LARS without momentum is not supported!" ) + super().__init__( - super(LARS32bit, self).__init__( "lars", params, lr, (momentum, dampening), 0.0, weight_decay, 32, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False, ) ===========changed ref 1=========== # module: bitsandbytes.optim.lars class LARS8bit(Optimizer1State): def __init__( self, params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, args=None, min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02, ): if momentum == 0: raise NotImplementedError( + "LARS without momentum is not supported!" - f"LARS without momentum is not supported!" ) + super().__init__( - super(LARS8bit, self).__init__( "lars", params, lr, (momentum, dampening), 0.0, weight_decay, 8, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False, ) ===========changed ref 2=========== # module: bitsandbytes.optim.lars class LARS(Optimizer1State): def __init__( self, params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02, ): if momentum == 0: raise NotImplementedError( + "LARS without momentum is not supported!" - f"LARS without momentum is not supported!" ) + super().__init__( - super(LARS, self).__init__( "lars", params, lr, (momentum, dampening), 0.0, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False, ) ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 4=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 6=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 9=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 11=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 13=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 14=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 15=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 16=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 17=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 18=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 19=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 20=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def print_log_stack(self): - for msg, is_warning in self.cuda_setup_log: - if is_warning: - warn(msg) - else: - print(msg) -
bitsandbytes.optim.lars/PytorchLARS.__setstate__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> super().__setstate__(state) <del> super(PytorchLARS, self).__setstate__(state)
# module: bitsandbytes.optim.lars class PytorchLARS(Optimizer): def __setstate__(self, state): <0> super(PytorchLARS, self).__setstate__(state) <1> for group in self.param_groups: <2> group.setdefault("nesterov", False) <3>
===========changed ref 0=========== # module: bitsandbytes.optim.lars class LARS32bit(Optimizer1State): def __init__( self, params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, args=None, min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02, ): if momentum == 0: raise NotImplementedError( + "LARS without momentum is not supported!" - f"LARS without momentum is not supported!" ) + super().__init__( - super(LARS32bit, self).__init__( "lars", params, lr, (momentum, dampening), 0.0, weight_decay, 32, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False, ) ===========changed ref 1=========== # module: bitsandbytes.optim.lars class LARS8bit(Optimizer1State): def __init__( self, params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, args=None, min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02, ): if momentum == 0: raise NotImplementedError( + "LARS without momentum is not supported!" - f"LARS without momentum is not supported!" ) + super().__init__( - super(LARS8bit, self).__init__( "lars", params, lr, (momentum, dampening), 0.0, weight_decay, 8, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False, ) ===========changed ref 2=========== # module: bitsandbytes.optim.lars class LARS(Optimizer1State): def __init__( self, params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02, ): if momentum == 0: raise NotImplementedError( + "LARS without momentum is not supported!" - f"LARS without momentum is not supported!" ) + super().__init__( - super(LARS, self).__init__( "lars", params, lr, (momentum, dampening), 0.0, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False, ) ===========changed ref 3=========== # module: bitsandbytes.optim.lars class PytorchLARS(Optimizer): def __init__( self, params, lr=0.01, momentum=0, dampening=0, weight_decay=0, nesterov=False, max_unorm=0.02, ): if lr < 0.0: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if momentum < 0.0: + raise ValueError(f"Invalid momentum value: {momentum}") - raise ValueError("Invalid momentum value: {}".format(momentum)) if weight_decay < 0.0: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) defaults = dict( lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov, max_unorm=max_unorm, ) if nesterov and (momentum <= 0 or dampening != 0): raise ValueError( "Nesterov momentum requires a momentum and zero dampening" ) + super().__init__(params, defaults) - super(PytorchLARS, self).__init__(params, defaults) ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 5=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 7=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 10=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 12=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 14=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 15=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 16=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" )
bitsandbytes.optim.lars/PytorchLARS.step
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<29>:<add> d_p = d_p.add(p, alpha=weight_decay) <del> d_p = d_p.add(param, alpha=weight_decay)
# module: bitsandbytes.optim.lars class PytorchLARS(Optimizer): @torch.no_grad() def step(self, closure=None): <0> """Performs a single optimization step. <1> <2> Args: <3> closure (callable, optional): A closure that reevaluates the model <4> and returns the loss. <5> """ <6> loss = None <7> if closure is not None: <8> with torch.enable_grad(): <9> loss = closure() <10> <11> for group in self.param_groups: <12> params_with_grad = [] <13> d_p_list = [] <14> momentum_buffer_list = [] <15> weight_decay = group["weight_decay"] <16> momentum = group["momentum"] <17> dampening = group["dampening"] <18> nesterov = group["nesterov"] <19> max_unorm = group["max_unorm"] <20> lr = group["lr"] <21> <22> for p in group["params"]: <23> if p.grad is None: <24> continue <25> <26> state = self.state[p] <27> d_p = p.grad <28> if weight_decay != 0: <29> d_p = d_p.add(param, alpha=weight_decay) <30> <31> if momentum != 0: <32> buf = state.get("momentum_buffer", None) <33> <34> if buf is None: <35> buf = torch.clone(d_p).detach() <36> state["momentum_buffer"] = buf <37> else: <38> buf.mul_(momentum).add_(d_p, alpha=1 - dampening) <39> <40> if nesterov: <41> update = d_p + buf * momentum <42> else: <43> update = buf <44> <45> update_scale = 1.0 <46> if max_unorm > 0.0: <47> assert p.dtype == torch.float32 <48> pnorm = torch.norm(p.detach()) <49> unorm</s>
===========below chunk 0=========== # module: bitsandbytes.optim.lars class PytorchLARS(Optimizer): @torch.no_grad() def step(self, closure=None): # offset: 1 if unorm > max_unorm * pnorm: update_scale = max_unorm * pnorm / unorm p.add_(update, alpha=-lr * update_scale) return loss ===========changed ref 0=========== # module: bitsandbytes.optim.lars class PytorchLARS(Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(PytorchLARS, self).__setstate__(state) for group in self.param_groups: group.setdefault("nesterov", False) ===========changed ref 1=========== # module: bitsandbytes.optim.lars class LARS32bit(Optimizer1State): def __init__( self, params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, args=None, min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02, ): if momentum == 0: raise NotImplementedError( + "LARS without momentum is not supported!" - f"LARS without momentum is not supported!" ) + super().__init__( - super(LARS32bit, self).__init__( "lars", params, lr, (momentum, dampening), 0.0, weight_decay, 32, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False, ) ===========changed ref 2=========== # module: bitsandbytes.optim.lars class LARS8bit(Optimizer1State): def __init__( self, params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, args=None, min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02, ): if momentum == 0: raise NotImplementedError( + "LARS without momentum is not supported!" - f"LARS without momentum is not supported!" ) + super().__init__( - super(LARS8bit, self).__init__( "lars", params, lr, (momentum, dampening), 0.0, weight_decay, 8, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False, ) ===========changed ref 3=========== # module: bitsandbytes.optim.lars class LARS(Optimizer1State): def __init__( self, params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02, ): if momentum == 0: raise NotImplementedError( + "LARS without momentum is not supported!" - f"LARS without momentum is not supported!" ) + super().__init__( - super(LARS, self).__init__( "lars", params, lr, (momentum, dampening), 0.0, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False, ) ===========changed ref 4=========== # module: bitsandbytes.optim.lars class PytorchLARS(Optimizer): def __init__( self, params, lr=0.01, momentum=0, dampening=0, weight_decay=0, nesterov=False, max_unorm=0.02, ): if lr < 0.0: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if momentum < 0.0: + raise ValueError(f"Invalid momentum value: {momentum}") - raise ValueError("Invalid momentum value: {}".format(momentum)) if weight_decay < 0.0: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) defaults = dict( lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov, max_unorm=max_unorm, ) if nesterov and (momentum <= 0 or dampening != 0): raise ValueError( "Nesterov momentum requires a momentum and zero dampening" ) + super().__init__(params, defaults) - super(PytorchLARS, self).__init__(params, defaults) ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 6=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 8=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 11=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 13=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 14=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 15=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden")
bitsandbytes.optim.sgd/SGD.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<1>:<add> raise NotImplementedError("SGD without momentum is not supported!") <del> raise NotImplementedError(f"SGD without momentum is not supported!") <2>:<add> super().__init__( <del> super(SGD, self).__init__(
# module: bitsandbytes.optim.sgd class SGD(Optimizer1State): def __init__( self, params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): <0> if momentum == 0: <1> raise NotImplementedError(f"SGD without momentum is not supported!") <2> super(SGD, self).__init__( <3> "momentum", <4> params, <5> lr, <6> (momentum, dampening), <7> 0.0, <8> weight_decay, <9> optim_bits, <10> args, <11> min_8bit_size, <12> percentile_clipping, <13> block_wise, <14> ) <15>
===========unchanged ref 0=========== at: bitsandbytes.optim.optimizer.Optimizer1State __init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) __init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) ===========changed ref 0=========== <s> __init__( self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") - raise ValueError("Invalid epsilon value: {}".format(eps)) for i in range(len(betas)): if not 0.0 <= betas[i] < 1.0: raise ValueError( f"Invalid beta parameter at index {i}: {betas[i]}" ) if not 0.0 <= weight_decay: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + super().__init__(params, defaults, optim_bits) - super(Optimizer1State, self).__init__(params, defaults, optim_bits) if args is None: args = {} args["optim_bits"] = optim_bits args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args["percentile_clipping"] = percentile_clipping args["block_wise"] = block_wise args["max_unorm"] = max_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else</s> ===========changed ref 1=========== <s> self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): # offset: 1 <s>_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else: self.args = args self.optimizer_name = optimizer_name ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 3=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 5=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 8=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 10=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 12=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 14=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 15=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 16=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 17=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 18=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } +
bitsandbytes.optim.sgd/SGD8bit.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<1>:<add> raise NotImplementedError("SGD without momentum is not supported!") <del> raise NotImplementedError(f"SGD without momentum is not supported!") <2>:<add> super().__init__( <del> super(SGD8bit, self).__init__(
# module: bitsandbytes.optim.sgd class SGD8bit(Optimizer1State): def __init__( self, params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): <0> if momentum == 0: <1> raise NotImplementedError(f"SGD without momentum is not supported!") <2> super(SGD8bit, self).__init__( <3> "momentum", <4> params, <5> lr, <6> (momentum, dampening), <7> 0.0, <8> weight_decay, <9> 8, <10> args, <11> min_8bit_size, <12> percentile_clipping, <13> block_wise, <14> ) <15>
===========unchanged ref 0=========== at: bitsandbytes.optim.optimizer.Optimizer1State __init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) __init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) ===========changed ref 0=========== <s> __init__( self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") - raise ValueError("Invalid epsilon value: {}".format(eps)) for i in range(len(betas)): if not 0.0 <= betas[i] < 1.0: raise ValueError( f"Invalid beta parameter at index {i}: {betas[i]}" ) if not 0.0 <= weight_decay: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + super().__init__(params, defaults, optim_bits) - super(Optimizer1State, self).__init__(params, defaults, optim_bits) if args is None: args = {} args["optim_bits"] = optim_bits args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args["percentile_clipping"] = percentile_clipping args["block_wise"] = block_wise args["max_unorm"] = max_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else</s> ===========changed ref 1=========== <s> self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): # offset: 1 <s>_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else: self.args = args self.optimizer_name = optimizer_name ===========changed ref 2=========== # module: bitsandbytes.optim.sgd class SGD(Optimizer1State): def __init__( self, params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): if momentum == 0: + raise NotImplementedError("SGD without momentum is not supported!") - raise NotImplementedError(f"SGD without momentum is not supported!") + super().__init__( - super(SGD, self).__init__( "momentum", params, lr, (momentum, dampening), 0.0, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 4=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 6=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 9=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 11=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 13=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 14=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 15=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 16=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance +
bitsandbytes.optim.sgd/SGD32bit.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<1>:<add> raise NotImplementedError("SGD without momentum is not supported!") <del> raise NotImplementedError(f"SGD without momentum is not supported!") <2>:<add> super().__init__( <del> super(SGD32bit, self).__init__(
# module: bitsandbytes.optim.sgd class SGD32bit(Optimizer1State): def __init__( self, params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): <0> if momentum == 0: <1> raise NotImplementedError(f"SGD without momentum is not supported!") <2> super(SGD32bit, self).__init__( <3> "momentum", <4> params, <5> lr, <6> (momentum, dampening), <7> 0.0, <8> weight_decay, <9> 32, <10> args, <11> min_8bit_size, <12> percentile_clipping, <13> block_wise, <14> ) <15>
===========unchanged ref 0=========== at: bitsandbytes.optim.optimizer.Optimizer1State __init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) __init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) ===========changed ref 0=========== <s> __init__( self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") - raise ValueError("Invalid epsilon value: {}".format(eps)) for i in range(len(betas)): if not 0.0 <= betas[i] < 1.0: raise ValueError( f"Invalid beta parameter at index {i}: {betas[i]}" ) if not 0.0 <= weight_decay: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + super().__init__(params, defaults, optim_bits) - super(Optimizer1State, self).__init__(params, defaults, optim_bits) if args is None: args = {} args["optim_bits"] = optim_bits args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args["percentile_clipping"] = percentile_clipping args["block_wise"] = block_wise args["max_unorm"] = max_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else</s> ===========changed ref 1=========== <s> self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): # offset: 1 <s>_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else: self.args = args self.optimizer_name = optimizer_name ===========changed ref 2=========== # module: bitsandbytes.optim.sgd class SGD8bit(Optimizer1State): def __init__( self, params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): if momentum == 0: + raise NotImplementedError("SGD without momentum is not supported!") - raise NotImplementedError(f"SGD without momentum is not supported!") + super().__init__( - super(SGD8bit, self).__init__( "momentum", params, lr, (momentum, dampening), 0.0, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 3=========== # module: bitsandbytes.optim.sgd class SGD(Optimizer1State): def __init__( self, params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): if momentum == 0: + raise NotImplementedError("SGD without momentum is not supported!") - raise NotImplementedError(f"SGD without momentum is not supported!") + super().__init__( - super(SGD, self).__init__( "momentum", params, lr, (momentum, dampening), 0.0, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 5=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 7=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 10=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 12=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 14=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden")
bitsandbytes.optim.rmsprop/RMSprop.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<2>:<add> "RMSprop with alpha==0.0 is not supported!" <del> f"RMSprop with alpha==0.0 is not supported!" <5>:<add> raise NotImplementedError("Centered RMSprop is not supported!") <del> raise NotImplementedError(f"Centered RMSprop is not supported!") <6>:<add> super().__init__( <del> super(RMSprop, self).__init__(
<s>bytes.optim.rmsprop class RMSprop(Optimizer1State): def __init__( self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): <0> if alpha == 0: <1> raise NotImplementedError( <2> f"RMSprop with alpha==0.0 is not supported!" <3> ) <4> if centered: <5> raise NotImplementedError(f"Centered RMSprop is not supported!") <6> super(RMSprop, self).__init__( <7> "rmsprop", <8> params, <9> lr, <10> (alpha, momentum), <11> eps, <12> weight_decay, <13> optim_bits, <14> args, <15> min_8bit_size, <16> percentile_clipping, <17> block_wise, <18> ) <19>
===========unchanged ref 0=========== at: bitsandbytes.optim.optimizer.Optimizer1State __init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) __init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) ===========changed ref 0=========== <s> __init__( self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") - raise ValueError("Invalid epsilon value: {}".format(eps)) for i in range(len(betas)): if not 0.0 <= betas[i] < 1.0: raise ValueError( f"Invalid beta parameter at index {i}: {betas[i]}" ) if not 0.0 <= weight_decay: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + super().__init__(params, defaults, optim_bits) - super(Optimizer1State, self).__init__(params, defaults, optim_bits) if args is None: args = {} args["optim_bits"] = optim_bits args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args["percentile_clipping"] = percentile_clipping args["block_wise"] = block_wise args["max_unorm"] = max_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else</s> ===========changed ref 1=========== <s> self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): # offset: 1 <s>_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else: self.args = args self.optimizer_name = optimizer_name ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 3=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 5=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 8=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 10=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 12=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 14=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 15=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 16=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 17=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 18=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } +
bitsandbytes.optim.rmsprop/RMSprop8bit.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<2>:<add> "RMSprop with alpha==0.0 is not supported!" <del> f"RMSprop with alpha==0.0 is not supported!" <5>:<add> raise NotImplementedError("Centered RMSprop is not supported!") <del> raise NotImplementedError(f"Centered RMSprop is not supported!") <6>:<add> super().__init__( <del> super(RMSprop8bit, self).__init__(
# module: bitsandbytes.optim.rmsprop class RMSprop8bit(Optimizer1State): def __init__( self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): <0> if alpha == 0: <1> raise NotImplementedError( <2> f"RMSprop with alpha==0.0 is not supported!" <3> ) <4> if centered: <5> raise NotImplementedError(f"Centered RMSprop is not supported!") <6> super(RMSprop8bit, self).__init__( <7> "rmsprop", <8> params, <9> lr, <10> (alpha, momentum), <11> eps, <12> weight_decay, <13> 8, <14> args, <15> min_8bit_size, <16> percentile_clipping, <17> block_wise, <18> ) <19>
===========unchanged ref 0=========== at: bitsandbytes.optim.optimizer.Optimizer1State __init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) __init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) ===========changed ref 0=========== <s> __init__( self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") - raise ValueError("Invalid epsilon value: {}".format(eps)) for i in range(len(betas)): if not 0.0 <= betas[i] < 1.0: raise ValueError( f"Invalid beta parameter at index {i}: {betas[i]}" ) if not 0.0 <= weight_decay: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + super().__init__(params, defaults, optim_bits) - super(Optimizer1State, self).__init__(params, defaults, optim_bits) if args is None: args = {} args["optim_bits"] = optim_bits args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args["percentile_clipping"] = percentile_clipping args["block_wise"] = block_wise args["max_unorm"] = max_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else</s> ===========changed ref 1=========== <s> self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): # offset: 1 <s>_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else: self.args = args self.optimizer_name = optimizer_name ===========changed ref 2=========== <s>bytes.optim.rmsprop class RMSprop(Optimizer1State): def __init__( self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): if alpha == 0: raise NotImplementedError( + "RMSprop with alpha==0.0 is not supported!" - f"RMSprop with alpha==0.0 is not supported!" ) if centered: + raise NotImplementedError("Centered RMSprop is not supported!") - raise NotImplementedError(f"Centered RMSprop is not supported!") + super().__init__( - super(RMSprop, self).__init__( "rmsprop", params, lr, (alpha, momentum), eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 4=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 6=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 9=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 11=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 13=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 14=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 15=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" )
bitsandbytes.optim.rmsprop/RMSprop32bit.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<2>:<add> "RMSprop with alpha==0.0 is not supported!" <del> f"RMSprop with alpha==0.0 is not supported!" <5>:<add> raise NotImplementedError("Centered RMSprop is not supported!") <del> raise NotImplementedError(f"Centered RMSprop is not supported!") <6>:<add> super().__init__( <del> super(RMSprop32bit, self).__init__(
# module: bitsandbytes.optim.rmsprop class RMSprop32bit(Optimizer1State): def __init__( self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): <0> if alpha == 0: <1> raise NotImplementedError( <2> f"RMSprop with alpha==0.0 is not supported!" <3> ) <4> if centered: <5> raise NotImplementedError(f"Centered RMSprop is not supported!") <6> super(RMSprop32bit, self).__init__( <7> "rmsprop", <8> params, <9> lr, <10> (alpha, momentum), <11> eps, <12> weight_decay, <13> 32, <14> args, <15> min_8bit_size, <16> percentile_clipping, <17> block_wise, <18> ) <19>
===========unchanged ref 0=========== at: bitsandbytes.optim.optimizer.Optimizer1State __init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) __init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) ===========changed ref 0=========== <s> __init__( self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") - raise ValueError("Invalid epsilon value: {}".format(eps)) for i in range(len(betas)): if not 0.0 <= betas[i] < 1.0: raise ValueError( f"Invalid beta parameter at index {i}: {betas[i]}" ) if not 0.0 <= weight_decay: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + super().__init__(params, defaults, optim_bits) - super(Optimizer1State, self).__init__(params, defaults, optim_bits) if args is None: args = {} args["optim_bits"] = optim_bits args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args["percentile_clipping"] = percentile_clipping args["block_wise"] = block_wise args["max_unorm"] = max_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else</s> ===========changed ref 1=========== <s> self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): # offset: 1 <s>_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else: self.args = args self.optimizer_name = optimizer_name ===========changed ref 2=========== # module: bitsandbytes.optim.rmsprop class RMSprop8bit(Optimizer1State): def __init__( self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): if alpha == 0: raise NotImplementedError( + "RMSprop with alpha==0.0 is not supported!" - f"RMSprop with alpha==0.0 is not supported!" ) if centered: + raise NotImplementedError("Centered RMSprop is not supported!") - raise NotImplementedError(f"Centered RMSprop is not supported!") + super().__init__( - super(RMSprop8bit, self).__init__( "rmsprop", params, lr, (alpha, momentum), eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 3=========== <s>bytes.optim.rmsprop class RMSprop(Optimizer1State): def __init__( self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): if alpha == 0: raise NotImplementedError( + "RMSprop with alpha==0.0 is not supported!" - f"RMSprop with alpha==0.0 is not supported!" ) if centered: + raise NotImplementedError("Centered RMSprop is not supported!") - raise NotImplementedError(f"Centered RMSprop is not supported!") + super().__init__( - super(RMSprop, self).__init__( "rmsprop", params, lr, (alpha, momentum), eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 5=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 7=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 10=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 12=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state)
bitsandbytes.optim.adam/Adam.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> super().__init__( <del> super(Adam, self).__init__(
<s>andbytes.optim.adam class Adam(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): <0> super(Adam, self).__init__( <1> "adam", <2> params, <3> lr, <4> betas, <5> eps, <6> weight_decay, <7> optim_bits, <8> args, <9> min_8bit_size, <10> percentile_clipping, <11> block_wise, <12> ) <13>
===========unchanged ref 0=========== at: bitsandbytes.optim.adam Adam(params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True) at: bitsandbytes.optim.optimizer.Optimizer2State __init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) __init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) ===========changed ref 0=========== <s> __init__( self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") - raise ValueError("Invalid epsilon value: {}".format(eps)) if isinstance(betas, str): # format: '(beta1, beta2)' betas = betas.replace("(", "").replace(")", "").strip().split(",") betas = [float(b) for b in betas] for i in range(len(betas)): if not 0.0 <= betas[i] < 1.0: raise ValueError( f"Invalid beta parameter at index {i}: {betas[i]}" ) if not 0.0 <= weight_decay: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + super().__init__(params, defaults, optim_bits) - super(Optimizer2State, self).__init__(params, defaults, optim_bits) if args is None: args = {} args["optim_bits"] = optim_bits args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args</s> ===========changed ref 1=========== <s> self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): # offset: 1 <s> args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args["percentile_clipping"] = percentile_clipping args["block_wise"] = block_wise args["max_unorm"] = max_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else: self.args = args self.optimizer_name = optimizer_name ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 3=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 5=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 8=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 10=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 12=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 14=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 15=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 16=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance -
bitsandbytes.optim.adam/Adam8bit.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> super().__init__( <del> super(Adam8bit, self).__init__(
# module: bitsandbytes.optim.adam class Adam8bit(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): <0> super(Adam8bit, self).__init__( <1> "adam", <2> params, <3> lr, <4> betas, <5> eps, <6> weight_decay, <7> 8, <8> args, <9> min_8bit_size, <10> percentile_clipping, <11> block_wise, <12> ) <13>
===========unchanged ref 0=========== at: bitsandbytes.optim.adam Adam8bit(params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True) at: bitsandbytes.optim.optimizer.Optimizer2State __init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) __init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) ===========changed ref 0=========== <s> __init__( self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") - raise ValueError("Invalid epsilon value: {}".format(eps)) if isinstance(betas, str): # format: '(beta1, beta2)' betas = betas.replace("(", "").replace(")", "").strip().split(",") betas = [float(b) for b in betas] for i in range(len(betas)): if not 0.0 <= betas[i] < 1.0: raise ValueError( f"Invalid beta parameter at index {i}: {betas[i]}" ) if not 0.0 <= weight_decay: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + super().__init__(params, defaults, optim_bits) - super(Optimizer2State, self).__init__(params, defaults, optim_bits) if args is None: args = {} args["optim_bits"] = optim_bits args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args</s> ===========changed ref 1=========== <s> self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): # offset: 1 <s> args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args["percentile_clipping"] = percentile_clipping args["block_wise"] = block_wise args["max_unorm"] = max_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else: self.args = args self.optimizer_name = optimizer_name ===========changed ref 2=========== <s>andbytes.optim.adam class Adam(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): + super().__init__( - super(Adam, self).__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 4=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 6=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 9=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 11=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 13=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 14=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 15=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" )
bitsandbytes.optim.adam/Adam32bit.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> super().__init__( <del> super(Adam32bit, self).__init__(
# module: bitsandbytes.optim.adam class Adam32bit(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): <0> super(Adam32bit, self).__init__( <1> "adam", <2> params, <3> lr, <4> betas, <5> eps, <6> weight_decay, <7> 32, <8> args, <9> min_8bit_size, <10> percentile_clipping, <11> block_wise, <12> ) <13>
===========unchanged ref 0=========== at: bitsandbytes.optim.adam Adam32bit(params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True) at: bitsandbytes.optim.optimizer.Optimizer2State __init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) __init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) ===========changed ref 0=========== <s> __init__( self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") - raise ValueError("Invalid epsilon value: {}".format(eps)) if isinstance(betas, str): # format: '(beta1, beta2)' betas = betas.replace("(", "").replace(")", "").strip().split(",") betas = [float(b) for b in betas] for i in range(len(betas)): if not 0.0 <= betas[i] < 1.0: raise ValueError( f"Invalid beta parameter at index {i}: {betas[i]}" ) if not 0.0 <= weight_decay: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + super().__init__(params, defaults, optim_bits) - super(Optimizer2State, self).__init__(params, defaults, optim_bits) if args is None: args = {} args["optim_bits"] = optim_bits args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args</s> ===========changed ref 1=========== <s> self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): # offset: 1 <s> args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args["percentile_clipping"] = percentile_clipping args["block_wise"] = block_wise args["max_unorm"] = max_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else: self.args = args self.optimizer_name = optimizer_name ===========changed ref 2=========== # module: bitsandbytes.optim.adam class Adam8bit(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): + super().__init__( - super(Adam8bit, self).__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 3=========== <s>andbytes.optim.adam class Adam(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): + super().__init__( - super(Adam, self).__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 5=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 7=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 10=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 12=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} +
bitsandbytes.optim.adam/AnalysisAdam.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<7>:<add> super().__init__(params, defaults) <del> super(AnalysisAdam, self).__init__(params, defaults)
# module: bitsandbytes.optim.adam class AnalysisAdam(torch.optim.Optimizer): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, bnb_analysis="dynamic-blockwise", savedir=None, ): <0> defaults = dict( <1> lr=lr, <2> betas=betas, <3> eps=eps, <4> weight_decay=weight_decay, <5> amsgrad=amsgrad, <6> ) <7> super(AnalysisAdam, self).__init__(params, defaults) <8> self.analysis = bnb_analysis <9> self.savedir = savedir <10>
===========unchanged ref 0=========== at: bitsandbytes.optim.adam AnalysisAdam(params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, bnb_analysis="dynamic-blockwise", savedir=None) at: torch.optim.optimizer.Optimizer OptimizerPreHook: TypeAlias = Callable[[Self, Args, Kwargs], Optional[Tuple[Args, Kwargs]]] # type: ignore[misc] OptimizerPostHook: TypeAlias = Callable[[Self, Args, Kwargs], None] # type: ignore[misc] _optimizer_step_pre_hooks: Dict[int, OptimizerPreHook] _optimizer_step_post_hooks: Dict[int, OptimizerPostHook] _optimizer_state_dict_pre_hooks: 'OrderedDict[int, Callable[["Optimizer"], None]]' _optimizer_state_dict_post_hooks: 'OrderedDict[int, Callable[["Optimizer", StateDict], Optional[StateDict]]]' _optimizer_load_state_dict_pre_hooks: 'OrderedDict[int, Callable[["Optimizer", StateDict], Optional[StateDict]]]' _optimizer_load_state_dict_post_hooks: 'OrderedDict[int, Callable[["Optimizer"], None]]' __init__(self, params: params_t, defaults: Dict[str, Any]) -> None __init__(params: params_t, defaults: Dict[str, Any]) -> None ===========changed ref 0=========== # module: bitsandbytes.optim.adam class Adam32bit(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): + super().__init__( - super(Adam32bit, self).__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 1=========== # module: bitsandbytes.optim.adam class Adam8bit(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): + super().__init__( - super(Adam8bit, self).__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 2=========== <s>andbytes.optim.adam class Adam(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): + super().__init__( - super(Adam, self).__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 4=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 6=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 9=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 11=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 13=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 14=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 15=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 16=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 17=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 18=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) +
bitsandbytes.optim.lamb/LAMB.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> super().__init__( <del> super(LAMB, self).__init__(
<s>, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, adam_w_mode=True, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=False, max_unorm=1.0, ): <0> super(LAMB, self).__init__( <1> "lamb", <2> params, <3> lr, <4> betas, <5> eps, <6> weight_decay, <7> optim_bits, <8> args, <9> min_8bit_size, <10> percentile_clipping, <11> block_wise, <12> max_unorm=1.0, <13> ) <14>
===========unchanged ref 0=========== at: bitsandbytes.optim.lamb LAMB(params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, adam_w_mode=True, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=False, max_unorm=1.0) at: bitsandbytes.optim.optimizer.Optimizer2State __init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) __init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) ===========changed ref 0=========== <s> __init__( self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") - raise ValueError("Invalid epsilon value: {}".format(eps)) if isinstance(betas, str): # format: '(beta1, beta2)' betas = betas.replace("(", "").replace(")", "").strip().split(",") betas = [float(b) for b in betas] for i in range(len(betas)): if not 0.0 <= betas[i] < 1.0: raise ValueError( f"Invalid beta parameter at index {i}: {betas[i]}" ) if not 0.0 <= weight_decay: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + super().__init__(params, defaults, optim_bits) - super(Optimizer2State, self).__init__(params, defaults, optim_bits) if args is None: args = {} args["optim_bits"] = optim_bits args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args</s> ===========changed ref 1=========== <s> self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): # offset: 1 <s> args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args["percentile_clipping"] = percentile_clipping args["block_wise"] = block_wise args["max_unorm"] = max_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else: self.args = args self.optimizer_name = optimizer_name ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 3=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 5=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 8=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 10=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 12=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 14=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 15=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 16=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance -
bitsandbytes.optim.lamb/LAMB8bit.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> super().__init__( <del> super(LAMB8bit, self).__init__(
<s>( self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, adam_w_mode=True, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=False, max_unorm=1.0, ): <0> super(LAMB8bit, self).__init__( <1> "lamb", <2> params, <3> lr, <4> betas, <5> eps, <6> weight_decay, <7> 8, <8> args, <9> min_8bit_size, <10> percentile_clipping, <11> block_wise, <12> max_unorm=1.0, <13> ) <14>
===========unchanged ref 0=========== at: bitsandbytes.optim.lamb LAMB8bit(params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, adam_w_mode=True, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=False, max_unorm=1.0) at: bitsandbytes.optim.optimizer.Optimizer2State __init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) __init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) ===========changed ref 0=========== <s> __init__( self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") - raise ValueError("Invalid epsilon value: {}".format(eps)) if isinstance(betas, str): # format: '(beta1, beta2)' betas = betas.replace("(", "").replace(")", "").strip().split(",") betas = [float(b) for b in betas] for i in range(len(betas)): if not 0.0 <= betas[i] < 1.0: raise ValueError( f"Invalid beta parameter at index {i}: {betas[i]}" ) if not 0.0 <= weight_decay: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + super().__init__(params, defaults, optim_bits) - super(Optimizer2State, self).__init__(params, defaults, optim_bits) if args is None: args = {} args["optim_bits"] = optim_bits args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args</s> ===========changed ref 1=========== <s> self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): # offset: 1 <s> args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args["percentile_clipping"] = percentile_clipping args["block_wise"] = block_wise args["max_unorm"] = max_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else: self.args = args self.optimizer_name = optimizer_name ===========changed ref 2=========== <s>, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, adam_w_mode=True, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=False, max_unorm=1.0, ): + super().__init__( - super(LAMB, self).__init__( "lamb", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, max_unorm=1.0, ) ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 4=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 6=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 9=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 11=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 13=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 14=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) +
bitsandbytes.optim.lamb/LAMB32bit.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> super().__init__( <del> super(LAMB32bit, self).__init__(
<s>( self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, adam_w_mode=True, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=False, max_unorm=1.0, ): <0> super(LAMB32bit, self).__init__( <1> "lamb", <2> params, <3> lr, <4> betas, <5> eps, <6> weight_decay, <7> 32, <8> args, <9> min_8bit_size, <10> percentile_clipping, <11> block_wise, <12> max_unorm=1.0, <13> ) <14>
===========unchanged ref 0=========== at: bitsandbytes.optim.lamb LAMB32bit(params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, adam_w_mode=True, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=False, max_unorm=1.0) at: bitsandbytes.optim.optimizer.Optimizer2State __init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) __init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False) ===========changed ref 0=========== <s> __init__( self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") - raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") - raise ValueError("Invalid epsilon value: {}".format(eps)) if isinstance(betas, str): # format: '(beta1, beta2)' betas = betas.replace("(", "").replace(")", "").strip().split(",") betas = [float(b) for b in betas] for i in range(len(betas)): if not 0.0 <= betas[i] < 1.0: raise ValueError( f"Invalid beta parameter at index {i}: {betas[i]}" ) if not 0.0 <= weight_decay: raise ValueError( + f"Invalid weight_decay value: {weight_decay}" - "Invalid weight_decay value: {}".format(weight_decay) ) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + super().__init__(params, defaults, optim_bits) - super(Optimizer2State, self).__init__(params, defaults, optim_bits) if args is None: args = {} args["optim_bits"] = optim_bits args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args</s> ===========changed ref 1=========== <s> self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False, ): # offset: 1 <s> args["percentile_clipping"] = 100 args["min_8bit_size"] = min_8bit_size args["percentile_clipping"] = percentile_clipping args["block_wise"] = block_wise args["max_unorm"] = max_unorm args["skip_zeros"] = skip_zeros self.args = MockArgs(args) else: self.args = args self.optimizer_name = optimizer_name ===========changed ref 2=========== <s>( self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, adam_w_mode=True, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=False, max_unorm=1.0, ): + super().__init__( - super(LAMB8bit, self).__init__( "lamb", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, max_unorm=1.0, ) ===========changed ref 3=========== <s>, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, adam_w_mode=True, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=False, max_unorm=1.0, ): + super().__init__( - super(LAMB, self).__init__( "lamb", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, max_unorm=1.0, ) ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 5=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 7=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 10=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 12=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state)
bitsandbytes.nn.modules/StableEmbedding.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> super().__init__( <del> super(StableEmbedding, self).__init__(
<s>torch.nn.Embedding): def __init__( self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None, max_norm: Optional[float] = None, norm_type: float = 2.0, scale_grad_by_freq: bool = False, sparse: bool = False, _weight: Optional[Tensor] = None, device=None, dtype=None, ) -> None: <0> super(StableEmbedding, self).__init__( <1> num_embeddings, <2> embedding_dim, <3> padding_idx, <4> max_norm, <5> norm_type, <6> scale_grad_by_freq, <7> sparse, <8> _weight, <9> device, <10> dtype, <11> ) <12> self.norm = torch.nn.LayerNorm(embedding_dim, device=device) <13> GlobalOptimManager.get_instance().register_module_override( <14> self, "weight", {"optim_bits": 32} <15> ) <16>
===========changed ref 0=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 1=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 3=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 6=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 8=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 10=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 12=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 14=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 15=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 16=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 17=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def print_log_stack(self): - for msg, is_warning in self.cuda_setup_log: - if is_warning: - warn(msg) - else: - print(msg) - ===========changed ref 18=========== # module: bitsandbytes.optim.lars class PytorchLARS(Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(PytorchLARS, self).__setstate__(state) for group in self.param_groups: group.setdefault("nesterov", False) ===========changed ref 19=========== # module: bitsandbytes.cuda_setup.main + def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + ===========changed ref 20=========== # module: bitsandbytes.autograd._functions tensor = torch.Tensor """ This class pools outlier dimensions across layers. + This is particularly important for small models where outlier features - This is particularly important for small models where outlier features are less systematic and occur with low frequency. """ ===========changed ref 21=========== # module: bitsandbytes.optim.adam class Adam32bit(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): + super().__init__( - super(Adam32bit, self).__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 22=========== # module: bitsandbytes.optim.adam class Adam8bit(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): + super().__init__( - super(Adam8bit, self).__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, )
bitsandbytes.nn.modules/Embedding.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> super().__init__( <del> super(Embedding, self).__init__(
<s> bitsandbytes.nn.modules class Embedding(torch.nn.Embedding): def __init__( self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None, max_norm: Optional[float] = None, norm_type: float = 2.0, scale_grad_by_freq: bool = False, sparse: bool = False, _weight: Optional[Tensor] = None, ) -> None: <0> super(Embedding, self).__init__( <1> num_embeddings, <2> embedding_dim, <3> padding_idx, <4> max_norm, <5> norm_type, <6> scale_grad_by_freq, <7> sparse, <8> _weight, <9> ) <10> GlobalOptimManager.get_instance().register_module_override( <11> self, "weight", {"optim_bits": 32} <12> ) <13>
===========changed ref 0=========== <s>torch.nn.Embedding): def __init__( self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None, max_norm: Optional[float] = None, norm_type: float = 2.0, scale_grad_by_freq: bool = False, sparse: bool = False, _weight: Optional[Tensor] = None, device=None, dtype=None, ) -> None: + super().__init__( - super(StableEmbedding, self).__init__( num_embeddings, embedding_dim, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse, _weight, device, dtype, ) self.norm = torch.nn.LayerNorm(embedding_dim, device=device) GlobalOptimManager.get_instance().register_module_override( self, "weight", {"optim_bits": 32} ) ===========changed ref 1=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 2=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 4=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 7=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 9=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 11=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 13=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 14=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 15=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 16=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 17=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 18=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def print_log_stack(self): - for msg, is_warning in self.cuda_setup_log: - if is_warning: - warn(msg) - else: - print(msg) - ===========changed ref 19=========== # module: bitsandbytes.optim.lars class PytorchLARS(Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(PytorchLARS, self).__setstate__(state) for group in self.param_groups: group.setdefault("nesterov", False) ===========changed ref 20=========== # module: bitsandbytes.cuda_setup.main + def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + ===========changed ref 21=========== # module: bitsandbytes.autograd._functions tensor = torch.Tensor """ This class pools outlier dimensions across layers. + This is particularly important for small models where outlier features - This is particularly important for small models where outlier features are less systematic and occur with low frequency. """ ===========changed ref 22=========== # module: bitsandbytes.optim.adam class Adam32bit(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): + super().__init__( - super(Adam32bit, self).__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, )
bitsandbytes.nn.modules/Linear8bitLt.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> super().__init__( <del> super(Linear8bitLt, self).__init__(
# module: bitsandbytes.nn.modules class Linear8bitLt(nn.Linear): def __init__( self, input_features, output_features, bias=True, has_fp16_weights=True, memory_efficient_backward=False, threshold=0.0, index=None, ): <0> super(Linear8bitLt, self).__init__( <1> input_features, output_features, bias <2> ) <3> self.state = bnb.MatmulLtState() <4> self.index = index <5> <6> self.state.threshold = threshold <7> self.state.has_fp16_weights = has_fp16_weights <8> self.state.memory_efficient_backward = memory_efficient_backward <9> if threshold > 0.0 and not has_fp16_weights: <10> self.state.use_pool = True <11> <12> self.weight = Int8Params( <13> self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights <14> ) <15>
===========changed ref 0=========== <s> bitsandbytes.nn.modules class Embedding(torch.nn.Embedding): def __init__( self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None, max_norm: Optional[float] = None, norm_type: float = 2.0, scale_grad_by_freq: bool = False, sparse: bool = False, _weight: Optional[Tensor] = None, ) -> None: + super().__init__( - super(Embedding, self).__init__( num_embeddings, embedding_dim, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse, _weight, ) GlobalOptimManager.get_instance().register_module_override( self, "weight", {"optim_bits": 32} ) ===========changed ref 1=========== <s>torch.nn.Embedding): def __init__( self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None, max_norm: Optional[float] = None, norm_type: float = 2.0, scale_grad_by_freq: bool = False, sparse: bool = False, _weight: Optional[Tensor] = None, device=None, dtype=None, ) -> None: + super().__init__( - super(StableEmbedding, self).__init__( num_embeddings, embedding_dim, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse, _weight, device, dtype, ) self.norm = torch.nn.LayerNorm(embedding_dim, device=device) GlobalOptimManager.get_instance().register_module_override( self, "weight", {"optim_bits": 32} ) ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 3=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 5=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 8=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 10=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 12=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 14=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 15=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 16=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 17=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 18=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 19=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def print_log_stack(self): - for msg, is_warning in self.cuda_setup_log: - if is_warning: - warn(msg) - else: - print(msg) - ===========changed ref 20=========== # module: bitsandbytes.optim.lars class PytorchLARS(Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(PytorchLARS, self).__setstate__(state) for group in self.param_groups: group.setdefault("nesterov", False) ===========changed ref 21=========== # module: bitsandbytes.cuda_setup.main + def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + ===========changed ref 22=========== # module: bitsandbytes.autograd._functions tensor = torch.Tensor """ This class pools outlier dimensions across layers. + This is particularly important for small models where outlier features - This is particularly important for small models where outlier features are less systematic and occur with low frequency. """
tests.test_functional/FFN.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> super().__init__() <del> super(FFN, self).__init__()
# module: tests.test_functional class FFN(torch.nn.Module): def __init__(self, input_features, hidden_size, bias=True): <0> super(FFN, self).__init__() <1> self.fc1 = torch.nn.Linear(input_features, hidden_size, bias=bias) <2> self.fc2 = torch.nn.Linear(hidden_size, input_features, bias=bias) <3> <4> with torch.no_grad(): <5> torch.nn.init.xavier_uniform_(self.fc1.weight) <6> torch.nn.init.xavier_uniform_(self.fc2.weight) <7>
===========changed ref 0=========== # module: tests.test_functional torch.set_printoptions( + precision=5, sci_mode=False, linewidth=120, edgeitems=20, threshold=10000 - precision=4, sci_mode=False, linewidth=120, edgeitems=20, threshold=10000 ) k = 20 ===========changed ref 1=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 2=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 4=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 7=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 9=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 11=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 13=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 14=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 15=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 16=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 17=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 18=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def print_log_stack(self): - for msg, is_warning in self.cuda_setup_log: - if is_warning: - warn(msg) - else: - print(msg) - ===========changed ref 19=========== # module: bitsandbytes.optim.lars class PytorchLARS(Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(PytorchLARS, self).__setstate__(state) for group in self.param_groups: group.setdefault("nesterov", False) ===========changed ref 20=========== # module: bitsandbytes.cuda_setup.main + def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + ===========changed ref 21=========== # module: bitsandbytes.autograd._functions tensor = torch.Tensor """ This class pools outlier dimensions across layers. + This is particularly important for small models where outlier features - This is particularly important for small models where outlier features are less systematic and occur with low frequency. """ ===========changed ref 22=========== # module: bitsandbytes.optim.adam class Adam32bit(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): + super().__init__( - super(Adam32bit, self).__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 23=========== # module: bitsandbytes.optim.adam class Adam8bit(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): + super().__init__( - super(Adam8bit, self).__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, )
tests.test_functional/Timer.tock
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<12>:<add> print(f"{name} took: {self.agg[name] / 1000.0:.5f}s") <del> print("{0} took: {1:.5f}s".format(name, self.agg[name] / 1000.0))
# module: tests.test_functional + class Timer: - class Timer(object): def tock(self, name="default", evict=True, print_ms=True): <0> if name in self.ends: <1> self.ends[name].record() <2> torch.cuda.synchronize() <3> ms = self.starts[name].elapsed_time(self.ends[name]) <4> if name not in self.agg: <5> self.agg[name] = 0.0 <6> self.agg[name] += ms <7> if evict: <8> self.starts.pop(name) <9> self.ends.pop(name) <10> <11> if print_ms and name in self.agg: <12> print("{0} took: {1:.5f}s".format(name, self.agg[name] / 1000.0)) <13> <14> return self.agg[name] <15>
===========changed ref 0=========== # module: tests.test_functional torch.set_printoptions( + precision=5, sci_mode=False, linewidth=120, edgeitems=20, threshold=10000 - precision=4, sci_mode=False, linewidth=120, edgeitems=20, threshold=10000 ) k = 20 ===========changed ref 1=========== # module: tests.test_functional class FFN(torch.nn.Module): def __init__(self, input_features, hidden_size, bias=True): + super().__init__() - super(FFN, self).__init__() self.fc1 = torch.nn.Linear(input_features, hidden_size, bias=bias) self.fc2 = torch.nn.Linear(hidden_size, input_features, bias=bias) with torch.no_grad(): torch.nn.init.xavier_uniform_(self.fc1.weight) torch.nn.init.xavier_uniform_(self.fc2.weight) ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 3=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 5=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 8=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 10=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 12=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 14=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 15=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 16=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 17=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 18=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 19=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def print_log_stack(self): - for msg, is_warning in self.cuda_setup_log: - if is_warning: - warn(msg) - else: - print(msg) - ===========changed ref 20=========== # module: bitsandbytes.optim.lars class PytorchLARS(Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(PytorchLARS, self).__setstate__(state) for group in self.param_groups: group.setdefault("nesterov", False) ===========changed ref 21=========== # module: bitsandbytes.cuda_setup.main + def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + ===========changed ref 22=========== # module: bitsandbytes.autograd._functions tensor = torch.Tensor """ This class pools outlier dimensions across layers. + This is particularly important for small models where outlier features - This is particularly important for small models where outlier features are less systematic and occur with low frequency. """ ===========changed ref 23=========== # module: bitsandbytes.optim.adam class Adam32bit(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): + super().__init__( - super(Adam32bit, self).__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, )
tests.test_functional/test_dynamic_blockwise_quantization
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> #print('') <add> for blocksize in [4096, 2048, 1024, 512]: <add> diffs = [] <del> diffs = [] <1>:<add> reldiffs = [] <del> reldiffs = [] <2>:<add> for i in range(100): <del> for i in range(100): <3>:<add> A1 = torch.randn(1024, 1024, device="cuda") <del> A1 = torch.randn(1024, 1024, device="cuda") <4>:<add> C, S = F.quantize_blockwise(A1, blocksize=blocksize) <del> C, S = F.quantize_blockwise(A1) <5>:<add> A2 = F.dequantize_blockwise(C, S, blocksize=blocksize) <del> A2 = F.dequantize_blockwise(C, S) <6>:<add> diff = torch.abs(A1 - A2) <del> diff = torch.abs(A1 - A2) <7>:<add> reldiff = diff / torch.abs(A1 + 1e-8) <del> reldiff = diff / torch.abs(A1 + 1e-8) <8>:<add> diffs.append(diff.mean().item()) <del> diffs.append(diff.mean().item()) <9>:<add> reldiffs.append(reldiff.mean().item()) <del> reldiffs.append(reldiff.mean().item()) <10>:<add> abserr = sum(diffs)/len(diffs) <add> relerr = sum(reldiffs)/len(reldiffs) <add> assert abserr < 0.011 <del> assert diffs[-1] < 0.011 <11>:<add>
# module: tests.test_functional def test_dynamic_blockwise_quantization(): <0> diffs = [] <1> reldiffs = [] <2> for i in range(100): <3> A1 = torch.randn(1024, 1024, device="cuda") <4> C, S = F.quantize_blockwise(A1) <5> A2 = F.dequantize_blockwise(C, S) <6> diff = torch.abs(A1 - A2) <7> reldiff = diff / torch.abs(A1 + 1e-8) <8> diffs.append(diff.mean().item()) <9> reldiffs.append(reldiff.mean().item()) <10> assert diffs[-1] < 0.011 <11> # print(sum(diffs)/len(diffs)) <12> # print(sum(reldiffs)/len(reldiffs)) <13> <14> diffs = [] <15> for i in range(100): <16> A1 = torch.rand(1024, 1024, device="cuda") <17> C, S = F.quantize_blockwise(A1) <18> A2 = F.dequantize_blockwise(C, S) <19> diff = torch.abs(A1 - A2).mean().item() <20> assert diff < 0.0033 <21> diffs.append(diff) <22> torch.testing.assert_allclose(A1, A2, atol=1e-2, rtol=0) <23>
===========changed ref 0=========== # module: tests.test_functional torch.set_printoptions( + precision=5, sci_mode=False, linewidth=120, edgeitems=20, threshold=10000 - precision=4, sci_mode=False, linewidth=120, edgeitems=20, threshold=10000 ) k = 20 ===========changed ref 1=========== # module: tests.test_functional class FFN(torch.nn.Module): def __init__(self, input_features, hidden_size, bias=True): + super().__init__() - super(FFN, self).__init__() self.fc1 = torch.nn.Linear(input_features, hidden_size, bias=bias) self.fc2 = torch.nn.Linear(hidden_size, input_features, bias=bias) with torch.no_grad(): torch.nn.init.xavier_uniform_(self.fc1.weight) torch.nn.init.xavier_uniform_(self.fc2.weight) ===========changed ref 2=========== # module: tests.test_functional + class Timer: - class Timer(object): def tock(self, name="default", evict=True, print_ms=True): if name in self.ends: self.ends[name].record() torch.cuda.synchronize() ms = self.starts[name].elapsed_time(self.ends[name]) if name not in self.agg: self.agg[name] = 0.0 self.agg[name] += ms if evict: self.starts.pop(name) self.ends.pop(name) if print_ms and name in self.agg: + print(f"{name} took: {self.agg[name] / 1000.0:.5f}s") - print("{0} took: {1:.5f}s".format(name, self.agg[name] / 1000.0)) return self.agg[name] ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 4=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 6=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 9=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 11=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 13=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 14=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 15=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 16=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 17=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 18=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 19=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 20=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def print_log_stack(self): - for msg, is_warning in self.cuda_setup_log: - if is_warning: - warn(msg) - else: - print(msg) - ===========changed ref 21=========== # module: bitsandbytes.optim.lars class PytorchLARS(Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(PytorchLARS, self).__setstate__(state) for group in self.param_groups: group.setdefault("nesterov", False) ===========changed ref 22=========== # module: bitsandbytes.cuda_setup.main + def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + ===========changed ref 23=========== # module: bitsandbytes.autograd._functions tensor = torch.Tensor """ This class pools outlier dimensions across layers. + This is particularly important for small models where outlier features - This is particularly important for small models where outlier features are less systematic and occur with low frequency. """
tests.test_functional/test_fp8_quant
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<16>:<add> #print(sum(abserr)/len(abserr)) <del> print(sum(abserr)/len(abserr)) <17>:<add> #print(sum(relerr)/len(relerr)) <del> print(sum(relerr)/len(relerr)) <30>:<add> #print(sum(abserr)/len(abserr)) <del> print(sum(abserr)/len(abserr)) <31>:<add> #print(sum(relerr)/len(relerr)) <del> print(sum(relerr)/len(relerr
# module: tests.test_functional # print(sum(diffs)/len(diffs)) # print(sum(reldiffs)/len(reldiffs)) def test_fp8_quant(): <0> for e_bits in range(1, 7): <1> p_bits = 7-e_bits <2> code = F.create_fp8_map(True, e_bits, p_bits).cuda() <3> <4> print(e_bits, p_bits) <5> abserr = [] <6> relerr = [] <7> for i in range(100): <8> A1 = torch.randn(1024, 1024, device="cuda") <9> C, SC = F.quantize_blockwise(A1, code=code) <10> A2 = F.dequantize_blockwise(C, SC) <11> diff = torch.abs(A1 - A2) <12> reldiff = diff/torch.abs(A1+1e-8) <13> abserr.append(diff.mean().item()) <14> relerr.append(reldiff.mean().item()) <15> #assert diff < 0.0075 <16> print(sum(abserr)/len(abserr)) <17> print(sum(relerr)/len(relerr)) <18> <19> abserr = [] <20> relerr = [] <21> for i in range(100): <22> A1 = torch.rand(1024, 1024, device="cuda") <23> C, SC = F.quantize_blockwise(A1, code=code) <24> A2 = F.dequantize_blockwise(C, SC) <25> diff = torch.abs(A1 - A2) <26> reldiff = diff/torch.abs(A1+1e-8) <27> abserr.append(diff.mean().item()) <28> relerr.append(reldiff.mean().item()) <29> #assert diff < 0.0075 <30> print(sum(abserr)/len(abserr)) <31> print(sum(relerr)/len(relerr</s>
===========below chunk 0=========== # module: tests.test_functional # print(sum(diffs)/len(diffs)) # print(sum(reldiffs)/len(reldiffs)) def test_fp8_quant(): # offset: 1 abserr = [] relerr = [] for i in range(100): A1 = torch.randn(1024, 1024, device="cuda") C, SC = F.quantize_blockwise(A1) A2 = F.dequantize_blockwise(C, SC) diff = torch.abs(A1 - A2) reldiff = diff/torch.abs(A1+1e-8) abserr.append(diff.mean().item()) relerr.append(reldiff.mean().item()) #assert diff < 0.0075 print(3, sum(abserr)/len(abserr)) print(3, sum(relerr)/len(relerr)) ===========changed ref 0=========== # module: tests.test_functional batch_size = 1 seqdim = 1 values = [] values.append((batch_size, seqdim, 768, 4 * 768)) # values.append((batch_size, seqdim, 1024, 4*1024)) # values.append((batch_size, seqdim, 1536, 4*1536)) # values.append((batch_size, seqdim, 2048, 4*2048)) # values.append((batch_size, seqdim, 2560, 4*2560)) # values.append((batch_size, seqdim, 4096, 4*4096)) # values.append((batch_size, seqdim, 5140, 4*5140)) #values.append((batch_size, seqdim, 12288, 4*12288)) names = [ + "batch_{}_seq_{}_model_{}_hidden_{}".format(*vals) for vals in values - "batch_{0}_seq_{1}_model_{2}_hidden_{3}".format(*vals) for vals in values ] + #print((time.time()-t0)/1e6) + ===========changed ref 1=========== # module: tests.test_functional n = 2 # dim1 = torch.randint(1,1*1024, size=(n,)).tolist() # dim2 = torch.randint(1,4*1024, size=(n,)).tolist() dim1 = [1 * 2048] # dim2 = [12288] dim2 = [2048] # dim1 = [2] # dim2 = [2] dtype = [torch.int8] values = list(product(dim1, dim2, dtype)) + names = ["dim1_{}_dim2_{}_dtype_{}".format(*vals) for vals in values] - names = ["dim1_{0}_dim2_{1}_dtype_{2}".format(*vals) for vals in values] ===========changed ref 2=========== <s>spmm_coo(cooA, Bt.t()) - # #out2 = F.spmm_coo(cooA, B) - # #out2 = F.spmm_coo_very_sparse(cooA, B) - # #out1 = torch.matmul(A, Bt.t()) - - # torch.cuda.synchronize() - # print(time.time() - t0) - - - def test_layout(): - a1 = torch.rand(16, 64, device="cuda", dtype=torch.float16) - a1 = torch.arange(16 * 64, device="cuda").reshape(16, 64).byte() - a2, s2 = F.transform(a1, "col_turing") - print(a2.shape) - - print(a1.flatten()[8 * 64 : 8 * 64 + 32]) - for i in range(4): - print(a2.flatten()[i * 8 * 32 : i * 8 * 32 + 32], 0) - ===========changed ref 3=========== # module: tests.test_functional n = 2 dim1 = torch.randint(256, 1 * 1024, size=(n,)).tolist() dim2 = torch.randint(256, 1 * 1024, size=(n,)).tolist() values = list(product(dim1, dim2)) + names = ["dim1_{}_dim2_{}".format(*vals) for vals in values] - names = ["dim1_{0}_dim2_{1}".format(*vals) for vals in values] ===========changed ref 4=========== # module: tests.test_functional n = 2 # dim1 = torch.randint(1,1*1024, size=(n,)).tolist() # dim2 = torch.randint(1,4*1024, size=(n,)).tolist() dim1 = [1 * 2048] dim2 = [12288] # dim1 = [32] # dim2 = [32] # dtype = [torch.float16, torch.int8] dtype = [torch.float16] out_function = ["zeros", "ones"] values = list(product(dim1, dim2, dtype, out_function)) names = [ + "dim1_{}_dim2_{}_dtype_{}_out_func_{}".format(*vals) for vals in values - "dim1_{0}_dim2_{1}_dtype_{2}_out_func_{3}".format(*vals) for vals in values ] ===========changed ref 5=========== # module: tests.test_functional n = 2 dim1 = torch.randint(1, 1 * 1024, size=(n,)).tolist() dim2 = torch.randint(1, 1 * 1024, size=(n,)).tolist() # dim1 = [7] # dim2 = [11] transposed_B = [False, True] values = list(product(dim1, dim2, transposed_B)) + names = ["dim1_{}_dim2_{}_transposed_B_{}".format(*vals) for vals in values] - names = ["dim1_{0}_dim2_{1}_transposed_B_{2}".format(*vals) for vals in values] ===========changed ref 6=========== # module: tests.test_functional n = 2 dim1 = torch.randint(1, 4 * 1024, size=(n,)).tolist() dim2 = torch.randint(1, 4 * 1024, size=(n,)).tolist() # dim1 = [4] # dim2 = [5] values = list(product(dim1, dim2)) + names = ["dim1_{}_dim2_{}".format(*vals) for vals in values] - names = ["dim1_{0}_dim2_{1}".format(*vals) for vals in values] ===========changed ref 7=========== # module: tests.test_functional n = 2 # dim1 = torch.randint(2,1024, size=(n,)).tolist() # dim2 = torch.randint(2,1024, size=(n,)).tolist() dim1 = [1] dim2 = [33] dtype = [torch.int8] # a_order = ['col_turing', 'col_ampere'] a_order = ["col_turing"] out_order = ["row"] values = list(product(dim1, dim2, dtype, a_order, out_order)) names = [ + "dim1_{}_dim2_{}_dtype_{}_orderA_{}_orderOut_{}".format(*vals) - "dim1_{0}_dim2_{1}_dtype_{2}_orderA_{3}_orderOut_{4}".format(*vals) for vals in values ] ===========changed ref 8=========== # module: tests.test_functional dim1 = [1024, 2048] inner = [12288 * 4, 4096 * 4] dim4 = [12288, 4096] values = list(zip(dim1, dim4, inner)) + names = ["dim1_{}_dim4_{}_inner_{}".format(*vals) for vals in values] - names = ["dim1_{0}_dim4_{1}_inner_{2}".format(*vals) for vals in values] ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None +
tests.test_optim/get_temp_dir
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> path = f"/tmp/autoswap/{str(uuid.uuid4())}" <del> path = "/tmp/autoswap/{0}".format(str(uuid.uuid4()))
# module: tests.test_optim def get_temp_dir(): <0> path = "/tmp/autoswap/{0}".format(str(uuid.uuid4())) <1> os.makedirs(path, exist_ok=True) <2> return path <3>
===========changed ref 0=========== # module: tests.test_functional - - ===========changed ref 1=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 2=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 4=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 7=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 9=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 11=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 13=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 14=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 15=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 16=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 17=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 18=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def print_log_stack(self): - for msg, is_warning in self.cuda_setup_log: - if is_warning: - warn(msg) - else: - print(msg) - ===========changed ref 19=========== # module: bitsandbytes.optim.lars class PytorchLARS(Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(PytorchLARS, self).__setstate__(state) for group in self.param_groups: group.setdefault("nesterov", False) ===========changed ref 20=========== # module: bitsandbytes.cuda_setup.main + def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + ===========changed ref 21=========== # module: tests.test_functional torch.set_printoptions( + precision=5, sci_mode=False, linewidth=120, edgeitems=20, threshold=10000 - precision=4, sci_mode=False, linewidth=120, edgeitems=20, threshold=10000 ) k = 20 ===========changed ref 22=========== # module: bitsandbytes.autograd._functions tensor = torch.Tensor """ This class pools outlier dimensions across layers. + This is particularly important for small models where outlier features - This is particularly important for small models where outlier features are less systematic and occur with low frequency. """ ===========changed ref 23=========== # module: bitsandbytes.optim.adam class Adam32bit(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): + super().__init__( - super(Adam32bit, self).__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, ) ===========changed ref 24=========== # module: bitsandbytes.optim.adam class Adam8bit(Optimizer2State): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, ): + super().__init__( - super(Adam8bit, self).__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, )
tests.test_modules/MLP8bit.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> super().__init__() <del> super(MLP8bit, self).__init__()
# module: tests.test_modules class MLP8bit(torch.nn.Module): def __init__(self, dim1, dim2, has_fp16_weights=True, memory_efficient_backward=False, threshold=0.0): <0> super(MLP8bit, self).__init__() <1> self.fc1 = bnb.nn.Linear8bitLt( <2> dim1, dim2, has_fp16_weights=has_fp16_weights, memory_efficient_backward=memory_efficient_backward, <3> threshold=threshold <4> ) <5> self.fc2 = bnb.nn.Linear8bitLt( <6> dim2, dim1, has_fp16_weights=has_fp16_weights, memory_efficient_backward=memory_efficient_backward, <7> threshold=threshold <8> ) <9>
===========unchanged ref 0=========== at: bitsandbytes.nn.modules Linear8bitLt(input_features, output_features, bias=True, has_fp16_weights=True, memory_efficient_backward=False, threshold=0.0, index=None) at: tests.test_modules MLP8bit(dim1, dim2, has_fp16_weights=True, memory_efficient_backward=False, threshold=0.0) at: torch.nn.modules.module.Module dump_patches: bool = False _version: int = 1 training: bool _parameters: Dict[str, Optional[Parameter]] _buffers: Dict[str, Optional[Tensor]] _non_persistent_buffers_set: Set[str] _backward_pre_hooks: Dict[int, Callable] _backward_hooks: Dict[int, Callable] _is_full_backward_hook: Optional[bool] _forward_hooks: Dict[int, Callable] _forward_hooks_with_kwargs: Dict[int, bool] _forward_hooks_always_called: Dict[int, bool] _forward_pre_hooks: Dict[int, Callable] _forward_pre_hooks_with_kwargs: Dict[int, bool] _state_dict_hooks: Dict[int, Callable] _load_state_dict_pre_hooks: Dict[int, Callable] _state_dict_pre_hooks: Dict[int, Callable] _load_state_dict_post_hooks: Dict[int, Callable] _modules: Dict[str, Optional['Module']] call_super_init: bool = False _compiled_call_impl : Optional[Callable] = None __init__(self) -> None __init__() -> None forward: Callable[..., Any] = _forward_unimplemented __call__ : Callable[..., Any] = _wrapped_call_impl T_destination = TypeVar('T_destination', bound=Dict[str, Any]) ===========changed ref 0=========== # module: tests.test_functional - - ===========changed ref 1=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 2=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 4=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 5=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 7=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 8=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 9=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 10=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 11=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 12=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 13=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 14=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 15=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 16=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 17=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 18=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def print_log_stack(self): - for msg, is_warning in self.cuda_setup_log: - if is_warning: - warn(msg) - else: - print(msg) - ===========changed ref 19=========== # module: bitsandbytes.optim.lars class PytorchLARS(Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(PytorchLARS, self).__setstate__(state) for group in self.param_groups: group.setdefault("nesterov", False) ===========changed ref 20=========== # module: bitsandbytes.cuda_setup.main + def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + ===========changed ref 21=========== # module: tests.test_functional torch.set_printoptions( + precision=5, sci_mode=False, linewidth=120, edgeitems=20, threshold=10000 - precision=4, sci_mode=False, linewidth=120, edgeitems=20, threshold=10000 ) k = 20
tests.test_modules/Linear8bit.__init__
Modified
bitsandbytes-foundation~bitsandbytes
9d353ca786c5dcf73d5fc9bd9a8104c30a73b97b
Merge pull request #87 from lostmsu/main
<0>:<add> super().__init__() <del> super(Linear8bit, self).__init__()
# module: tests.test_modules class Linear8bit(nn.Module): def __init__(self, input_features, output_features, bias=True, args=None): <0> super(Linear8bit, self).__init__() <1> self.input_features = input_features <2> self.output_features = output_features <3> self.args = args <4> <5> self.weight = nn.Parameter(torch.empty(output_features, input_features)) <6> if bias: <7> self.bias = nn.Parameter(torch.empty(output_features)) <8> else: <9> self.register_parameter("bias", None) <10> <11> torch.nn.init.xavier_uniform_(self.weight) <12> if self.bias is not None: <13> torch.nn.init.zeros_(self.bias) <14>
===========unchanged ref 0=========== at: tests.test_modules Linear8bit(input_features, output_features, bias=True, args=None) at: torch._C._VariableFunctions empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch.nn.init zeros_(tensor: Tensor) -> Tensor xavier_uniform_(tensor: Tensor, gain: float=1.) -> Tensor at: torch.nn.modules.module.Module __init__(self) -> None __init__() -> None ===========unchanged ref 1=========== register_parameter(name: str, param: Optional[Parameter]) -> None at: torch.nn.parameter Parameter(data: Tensor=..., requires_grad: builtins.bool=...) ===========changed ref 0=========== # module: tests.test_modules class MLP8bit(torch.nn.Module): def __init__(self, dim1, dim2, has_fp16_weights=True, memory_efficient_backward=False, threshold=0.0): + super().__init__() - super(MLP8bit, self).__init__() self.fc1 = bnb.nn.Linear8bitLt( dim1, dim2, has_fp16_weights=has_fp16_weights, memory_efficient_backward=memory_efficient_backward, threshold=threshold ) self.fc2 = bnb.nn.Linear8bitLt( dim2, dim1, has_fp16_weights=has_fp16_weights, memory_efficient_backward=memory_efficient_backward, threshold=threshold ) ===========changed ref 1=========== # module: tests.test_functional - - ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + _instance = None + ===========changed ref 3=========== # module: bitsandbytes.cextension - class CUDASetup(object): - _instance = None - ===========changed ref 4=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def __init__(self): + raise RuntimeError("Call get_instance() instead") + ===========changed ref 5=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def __init__(self): - raise RuntimeError("Call get_instance() instead") - ===========changed ref 6=========== # module: bitsandbytes.cuda_setup.main + CUDA_RUNTIME_LIB: str = "libcudart.so" ===========changed ref 7=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def add_log_entry(self, msg, is_warning=False): + self.cuda_setup_log.append((msg, is_warning)) + ===========changed ref 8=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def add_log_entry(self, msg, is_warning=False): - self.cuda_setup_log.append((msg, is_warning)) - ===========changed ref 9=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def initialize(self): + self.has_printed = False + self.lib = None + self.initialized = False + ===========changed ref 10=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def __setstate__(self, state): + super().__setstate__(state) - super(Optimizer8bit, self).__setstate__(state) ===========changed ref 11=========== # module: bitsandbytes.cuda_setup.main + def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + ===========changed ref 12=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") - raise NotImplementedError(f"init_state method needs to be overidden") ===========changed ref 13=========== # module: bitsandbytes.cuda_setup.main + def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + ===========changed ref 14=========== # module: bitsandbytes.optim.optimizer class Optimizer8bit(torch.optim.Optimizer): def update_step(self, group, p, gindex, pindex): raise NotImplementedError( + "The update_step method needs to be overridden" - f"The update_step method needs to be overidden" ) ===========changed ref 15=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + ===========changed ref 16=========== # module: bitsandbytes.cextension - class CUDASetup(object): - @classmethod - def get_instance(cls): - if cls._instance is None: - cls._instance = cls.__new__(cls) - cls._instance.initialize() - return cls._instance - ===========changed ref 17=========== # module: bitsandbytes.cuda_setup.main + class CUDASetup: + def print_log_stack(self): + for msg, is_warning in self.cuda_setup_log: + if is_warning: + warn(msg) + else: + print(msg) + ===========changed ref 18=========== # module: bitsandbytes.cuda_setup.main + def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + ===========changed ref 19=========== # module: bitsandbytes.cextension - class CUDASetup(object): - def print_log_stack(self): - for msg, is_warning in self.cuda_setup_log: - if is_warning: - warn(msg) - else: - print(msg) -
bitsandbytes.cuda_setup.main/CUDASetup.generate_instructions
Modified
bitsandbytes-foundation~bitsandbytes
81990491ff99425718810158d71233283096c734
Merge pull request #113 from Borzik/fix-warnings
# module: bitsandbytes.cuda_setup.main class CUDASetup: def generate_instructions(self): <0> if self.cuda is None: <1> self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected.') <2> self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.') <3> self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:') <4> self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null') <5> self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a') <6> self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc') <7> return <8> <9> if self.cudart_path is None: <10> self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.') <11> self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable') <12> self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev/null') <13> self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a') <14> self.add_log_entry('CUDA</s>
===========below chunk 0=========== # module: bitsandbytes.cuda_setup.main class CUDASetup: def generate_instructions(self): # offset: 1 self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.') self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/cuda_install.sh') self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.') self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local') return make_cmd = f'CUDA_VERSION={self.cuda_version_string}' if len(self.cuda_version_string) < 3: make_cmd += ' make cuda92' elif self.cuda_version_string == '110': make_cmd += ' make cuda110' elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0: make_cmd += ' make cuda11x' has_cublaslt = is_cublasLt_compatible(self.cc) if not has_cublaslt: make_cmd += '_nomatmul' self.add_log_entry('CUDA SETUP: Something unexpected happened. Please compile from source:') self.add_log_entry('git clone [email protected]:TimDettmers/bitsandbytes.git') self.add_log_entry('cd bitsandbytes') self.add_log_entry(make_cmd) self.add_log_entry('python setup.py install')
bitsandbytes.cuda_setup.main/get_cuda_version
Modified
bitsandbytes-foundation~bitsandbytes
81990491ff99425718810158d71233283096c734
Merge pull request #113 from Borzik/fix-warnings
<9>:<add> try: <add> check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ct.byref(version))) <del> check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ct.byref(version))) <10>:<add> except AttributeError as e: <add> CUDASetup.get_instance().add_log_entry(f'ERROR: {str(e)}') <add> CUDASetup.get_instance().add_log_entry(f'CUDA SETUP: libcudart.so path is {cudart_path}') <add> CUDASetup.get_instance().add_log_entry(f'CUDA SETUP: Is seems that your cuda installation is not in your path. See https://github.com/TimDettmers/bitsandbytes/issues/85 for more information.')
# module: bitsandbytes.cuda_setup.main # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION def get_cuda_version(cuda, cudart_path): <0> if cuda is None: return None <1> <2> try: <3> cudart = ct.CDLL(cudart_path) <4> except OSError: <5> CUDASetup.get_instance().add_log_entry(f'ERROR: libcudart.so could not be read from path: {cudart_path}!') <6> return None <7> <8> version = ct.c_int() <9> check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ct.byref(version))) <10> version = int(version.value) <11> major = version//1000 <12> minor = (version-(major*1000))//10 <13> <14> if major < 11: <15> CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currently not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!') <16> <17> return f'{major}{minor}' <18>
===========changed ref 0=========== # module: bitsandbytes.cuda_setup.main class CUDASetup: def generate_instructions(self): if self.cuda is None: self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected.') self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.') self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:') self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null') self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a') self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc') return if self.cudart_path is None: self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.') self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable') self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev/null') self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a') self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from</s> ===========changed ref 1=========== # module: bitsandbytes.cuda_setup.main class CUDASetup: def generate_instructions(self): # offset: 1 <s>1a') self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from 1b into your .bashrc file, located at ~/.bashrc') self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.') self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/cuda_install.sh') self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.') self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local') return make_cmd = f'CUDA_VERSION={self.cuda_version_string}' if len(self.cuda_version_string) < 3: make_cmd += ' make cuda92' elif self.cuda_version_string == '110': make_cmd += ' make cuda110' elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0: make_cmd += ' make cuda11x' + elif self.cuda_version_string == '100': + self.add_log_entry('CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.') + self.add_log_entry('CUDA SETUP: Before you try again running bitsandbytes, make sure old CUDA 10</s> ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main class CUDASetup: def generate_instructions(self): # offset: 2 <s> versions are uninstalled and removed from $LD_LIBRARY_PATH variables.') + return + has_cublaslt = is_cublasLt_compatible(self.cc) if not has_cublaslt: make_cmd += '_nomatmul' self.add_log_entry('CUDA SETUP: Something unexpected happened. Please compile from source:') self.add_log_entry('git clone [email protected]:TimDettmers/bitsandbytes.git') self.add_log_entry('cd bitsandbytes') self.add_log_entry(make_cmd) self.add_log_entry('python setup.py install')
bitsandbytes.cuda_setup.main/check_cuda_result
Modified
bitsandbytes-foundation~bitsandbytes
b3de19218e118b18dd1139bcd83e965733d701f4
Added error message for unexpected CUDA exception.
<4>:<add> if error_str.value is not None: <add> CUDASetup.get_instance().add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}") <del> CUDASetup.get_instance().add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}") <5>:<add> else: <add> CUDASetup.get_instance().add_log_entry(f"Unknown CUDA exception! Please check your CUDA install. It might also be that your GPU is too old.")
# module: bitsandbytes.cuda_setup.main def check_cuda_result(cuda, result_val): <0> # 3. Check for CUDA errors <1> if result_val != 0: <2> error_str = ct.c_char_p() <3> cuda.cuGetErrorString(result_val, ct.byref(error_str)) <4> CUDASetup.get_instance().add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}") <5>
bitsandbytes.autograd._functions/MatMul8bitLt.forward
Modified
bitsandbytes-foundation~bitsandbytes
de535889348c5406eb34d9f7e0c362cadb113be5
Added Int8 matmul support for all GPUs. Full backward support.
<0>:<add> using_igemmlt = torch.cuda.get_device_capability(device=A.device) >= (7, 5) and not state.force_no_igemmlt <add> # default of pytorch behavior if inputs are empty <del> # default to pytorch behavior if inputs are empty <8>:<add> return torch.empty(A.shape[:-1] + B.shape[1:], dtype=A.dtype, device=A.device) <del> return torch.empty(A.shape[:-1]+B.shape[1:], dtype=A.dtype, device=A.device) <10>:<add> return torch.empty(A.shape[:-1] + B.shape[:1], dtype=A.dtype, device=A.device) <del> return torch.empty(A.shape[:-1]+B.shape[:1], dtype=A.dtype, device=A.device) <29>:<del> CA, CAt, SCA, SCAt, coo_tensorA = F
<s> MatMul8bitLt(torch.autograd.Function): + # forward is the same, but we added the fallback for pre-turing GPUs + # backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None") + @staticmethod + def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState): - def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState()): <0> # default to pytorch behavior if inputs are empty <1> ctx.is_empty = False <2> if prod(A.shape) == 0: <3> ctx.is_empty = True <4> ctx.A = A <5> ctx.B = B <6> ctx.bias = bias <7> if A.shape[-1] == B.shape[0]: <8> return torch.empty(A.shape[:-1]+B.shape[1:], dtype=A.dtype, device=A.device) <9> else: <10> return torch.empty(A.shape[:-1]+B.shape[:1], dtype=A.dtype, device=A.device) <11> <12> # 1. Quantize A <13> # 2. Quantize B <14> # 3. Matmul <15> # 4. Mixed-precision decomposition matmul <16> # 5. Save state <17> formatB = state.formatB <18> input_shape = A.shape <19> if state.outlier_pool is None: <20> state.outlier_pool = GlobalOutlierPooler.get_instance() <21> <22> # Cast A to fp16 <23> if A.dtype != torch.float16: <24> warnings.warn(f"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization") <25> <26> # 1. Quantize A <27> if len(A.shape) == 3: <28> A = A.view(-1, A.shape[-1]).contiguous() <29> CA, CAt, SCA, SCAt, coo_tensorA = F</s>
===========below chunk 0=========== <s>(torch.autograd.Function): + # forward is the same, but we added the fallback for pre-turing GPUs + # backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None") + @staticmethod + def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState): - def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState()): # offset: 1 A.to(torch.float16), threshold=state.threshold ) if state.threshold > 0.0 and coo_tensorA is not None: if state.has_fp16_weights: idx = torch.unique(coo_tensorA.colidx).long() CA[:, idx] = 0 CAt[:, idx] = 0 subA = A[:, idx] state.subB = B[:, idx].t().contiguous() state.idx = idx else: if state.CxB is None: # B in in 8-bit row-major, we can transform it back to 16-bit to extract outlier dimensions # we also need to convert it to the turing/ampere format state.CxB, state.SB = F.transform(state.CB, to_order=formatB) else: if not state.has_fp16_weights and state.CxB is None: state.CxB, state.SB = F.transform(state.CB, to_order=formatB) subA = None # 2. Quantize B if state.has_fp16_weights: has_grad = True if (getattr(B, "grad", None) is not None) else False is_transposed = not B.is_contiguous() and B.shape[0] == B.stride(1) if is_transposed: B = B.contiguous() if (state.is_training and not has_grad) or state.CxB is None: state.</s> ===========below chunk 1=========== <s>(torch.autograd.Function): + # forward is the same, but we added the fallback for pre-turing GPUs + # backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None") + @staticmethod + def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState): - def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState()): # offset: 2 <s>contiguous() if (state.is_training and not has_grad) or state.CxB is None: state.reset_grads() ( CB, state.CBt, state.SCB, state.SCBt, coo_tensorB, ) = F.double_quant(B.to(torch.float16)) state.CxB, state.SB = F.transform(CB, to_order=formatB) else: has_grad = False if coo_tensorA is not None and not state.has_fp16_weights: # extract outliers outlier_idx = torch.unique(coo_tensorA.colidx) state.idx = outlier_idx # state.outlier_pool.add_outliers(outlier_idx, A.shape[-1]) # if state.use_pool and state.outlier_pool.model_dim == A.shape[-1]: # # do not use pool for 2nd FFN layer # state.idx = state.outlier_pool.get_current_outlier_idx().to(A.device) # else: # state.idx = outlier_idx outliers = F.extract_outliers(state.CxB, state.SB, state.idx.int()) state.subB = ( (outliers * state.SCB.view(-1,</s> ===========below chunk 2=========== <s>(torch.autograd.Function): + # forward is the same, but we added the fallback for pre-turing GPUs + # backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None") + @staticmethod + def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState): - def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState()): # offset: 3 <s> / 127.0) .t() .contiguous() .to(A.dtype) ) CA[:, state.idx.long()] = 0 CAt[:, state.idx.long()] = 0 subA = A[:, state.idx.long()] shapeB = state.SB[0] if len(input_shape) == 3: output_shape = (input_shape[0], input_shape[1], shapeB[0]) else: output_shape = (input_shape[0], shapeB[0]) # 3. Matmul C32A, SA = F.transform(CA, "col32") out32, Sout32 = F.igemmlt(C32A, state.CxB, SA, state.SB) # we apply the fused bias here if bias is None or bias.dtype == torch.float16: output = F.mm_dequant(out32, Sout32, SCA, state.SCB, bias=bias) output = output.to(A.dtype) else: # apply bias separately output = F.mm_dequant(out32, Sout32, SCA, state.SCB, bias=None) output = output.to(A.dtype).add_(bias) # 4. Mixed-precision decomposition matmul if coo_tensorA is not None and subA is</s> ===========below chunk 3=========== <s>(torch.autograd.Function): + # forward is the same, but we added the fallback for pre-turing GPUs + # backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None") + @staticmethod + def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState): - def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState()): # offset: 4 <s>: output += torch.matmul(subA, state.subB) # 5. Save state ctx.state = state ctx.formatB = formatB ctx.grad_shape = input_shape ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype if any(ctx.needs_input_grad[:2]): ctx.tensors = (CAt, subA) ctx.tensor_states = (SCAt, state.idx) else: ctx.tensors = [None, None] ctx.tensor_states = (None, None) ctx.save_for_backward(None, None) clone_func = torch.clone if len(output_shape) == 3 else lambda x : x return clone_func(output.view(output_shape))
bitsandbytes.autograd._functions/MatMul8bitLt.backward
Modified
bitsandbytes-foundation~bitsandbytes
de535889348c5406eb34d9f7e0c362cadb113be5
Added Int8 matmul support for all GPUs. Full backward support.
<1>:<add> bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias) <del> bias_grad = (None if ctx.bias is None else torch.zeros_like(ctx.bias)) <16>:<del> grad_output = grad_output.reshape( <17>:<del> -1, grad_output.shape[-1] <18>:<del> ).contiguous() <19>:<add> grad_output = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()
# module: bitsandbytes.autograd._functions class MatMul8bitLt(torch.autograd.Function): @staticmethod def backward(ctx, grad_output): <0> if ctx.is_empty: <1> bias_grad = (None if ctx.bias is None else torch.zeros_like(ctx.bias)) <2> return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None <3> req_gradA, req_gradB, _, req_gradBias, _ = ctx.needs_input_grad <4> CAt, subA = ctx.tensors <5> SCAt, idx = ctx.tensor_states <6> formatB = ctx.formatB <7> state = ctx.state <8> grad_A = grad_B = grad_bias = None <9> <10> if req_gradBias: <11> # compute grad_bias first before changing grad_output dtype <12> grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias) <13> <14> # Cast grad_output to fp16 <15> if len(grad_output.shape) == 3: <16> grad_output = grad_output.reshape( <17> -1, grad_output.shape[-1] <18> ).contiguous() <19> <20> Cgrad, Cgradt, SCgrad, SCgradt, coo_tensor = F.double_quant(grad_output.to(torch.float16)) <21> if req_gradB: <22> CxAt, SAt = F.transform(CAt, formatB, transpose=True) <23> C32grad, Sgrad = F.transform(Cgradt, "col32", transpose=True) <24> gradB32, SgradB32 = F.igemmlt(C32grad, CxAt, Sgrad, SAt) <25> grad_B = F.mm_dequant(gradB32, SgradB32, SCgradt, SCAt) <26> if state.threshold > 0.0 and subA is not None: <27> grad_B[:, idx] += torch.matmul(</s>
===========below chunk 0=========== # module: bitsandbytes.autograd._functions class MatMul8bitLt(torch.autograd.Function): @staticmethod def backward(ctx, grad_output): # offset: 1 if req_gradA: if state.CBt is not None: C32grad, Sgrad = F.transform(Cgrad, "col32") if state.CxBt is None: state.CxBt, state.SBt = F.transform( state.CBt, to_order=formatB, transpose=True ) gradA32, SgradA32 = F.igemmlt(C32grad, state.CxBt, Sgrad, state.SBt) grad_A = F.mm_dequant(gradA32, SgradA32, SCgrad, state.SCBt).view(ctx.grad_shape).to(ctx.dtype_A) elif state.CB is not None: CB = state.CB.to(ctx.dtype_A, copy=True).mul_(state.SCB.unsqueeze(1).mul(1. / 127.0)) grad_A = torch.matmul(grad_output, CB).view(ctx.grad_shape).to(ctx.dtype_A) else: raise Exception('State must contain either CBt or CB matrix for backward') return grad_A, grad_B, None, grad_bias, None ===========changed ref 0=========== # module: bitsandbytes.autograd._functions @dataclass class MatmulLtState: + def get_tile_size(self): + assert self.formatB in ( + "col_turing", + "col_ampere", + ), f"please find this assert and manually enter tile size for {self.formatB}" + return (8, 32) if self.formatB == "col_turing" else (32, 32) + ===========changed ref 1=========== # module: bitsandbytes.autograd._functions @dataclass class MatmulLtState: + tile_indices: Optional[torch.Tensor] = None + force_no_igemmlt: bool = False CB = None CxB = None SB = None SCB = None CxBt = None SBt = None CBt = None subB = None outlier_pool = None has_accumulated_gradients = False threshold = 0.0 idx = None is_training = True has_fp16_weights = True memory_efficient_backward = False use_pool = False formatB = F.get_special_format_str() ===========changed ref 2=========== # module: bitsandbytes.autograd._functions + def undo_layout(permuted_tensor: torch.Tensor, tile_indices: torch.LongTensor) -> torch.Tensor: + """ + Undo a tiled permutation such as turing or ampere layout + + :param permuted_tensor: torch tensor in a permuted layout + :param tile_indices: reverse transformation indices, from get_inverse_transform_indices + :return: contiguous row-major tensor + """ + (rows, cols), (tile_rows, tile_cols) = permuted_tensor.shape, tile_indices.shape + assert rows % tile_rows == cols % tile_cols == 0, "tensor must contain a whole number of tiles" + tensor = permuted_tensor.reshape(-1, tile_indices.numel()).t() + outputs = torch.empty_like(tensor) # note: not using .index_copy because it was slower on cuda + outputs[tile_indices.flatten()] = tensor + outputs = outputs.reshape(tile_rows, tile_cols, cols // tile_cols, rows // tile_rows) + outputs = outputs.permute(3, 0, 2, 1) # (rows // tile_rows, tile_rows), (cols // tile_cols, tile_cols) + return outputs.reshape(rows, cols).contiguous() + ===========changed ref 3=========== # module: bitsandbytes.autograd._functions + def get_inverse_transform_indices(transform_tile: callable, tile_size: Tuple[int, int]): + """ + Compute a permutation of indices that invert the specified (tiled) matrix transformation + + :param transform_tile: a function that applies forward transform to a tensor of shape [dim1, dim2] + :param tile_size: higher-level tile dimensions, i.e. (8, 32) for Turing and (32, 32) for Ampere + :note: we assume that tile_transform applies to a cpu-based int8 tensor of shape tile_size + :example: transform_tile function for the turing layout (bitsandbytes.functional as F) + :returns: indices + """ + d1, d2 = tile_size + assert 0 < d1 * d2 < 2**64 + tile_indices = torch.arange(d1 * d2, dtype=torch.int64).view(d1, d2) + # encode each position in tile as a tuple of <= 8 unique bytes + permuted_tile_indices = torch.zeros_like(tile_indices) + for i in range(8): + # select i-th byte, apply transformation and trace where each index ended up + ith_dim_indices = torch.div(tile_indices, 256**i, rounding_mode="trunc") % 256 + sample_tile_i = (ith_dim_indices - 128).to(torch.int8).contiguous() + assert torch.all(sample_tile_i.int() + 128 == ith_dim_indices), "int overflow" + permuted_tile_i = transform_tile(sample_tile_i) + ith_permuted_indices = permuted_tile_i.to(tile_indices.dtype) + 128 + permuted_tile_indices += ith_permuted_indices * (256**i) + if d1 * d2 < 256**i: + break # if all indices fit in i bytes, stop early + return perm</s> ===========changed ref 4=========== # module: bitsandbytes.autograd._functions + def get_inverse_transform_indices(transform_tile: callable, tile_size: Tuple[int, int]): # offset: 1 <s>1 * d2 < 256**i: + break # if all indices fit in i bytes, stop early + return permuted_tile_indices +
bitsandbytes.nn.modules/Linear8bitLt.__init__
Modified
bitsandbytes-foundation~bitsandbytes
de535889348c5406eb34d9f7e0c362cadb113be5
Added Int8 matmul support for all GPUs. Full backward support.
<0>:<del> super().__init__( <1>:<add> super().__init__(input_features, output_features, bias) <del> input_features, output_features, bias <2>:<add> assert not memory_efficient_backward, "memory_efficient_backward is no longer required and the argument is deprecated in 0.37.0 and will be removed in 0.39.0" <del> ) <12>:<del> self.weight = Int8Params( <13>:<add> self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights) <del> self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights <14>:<del> )
<s> __init__( - self, - input_features, - output_features, - bias=True, - has_fp16_weights=True, - memory_efficient_backward=False, - threshold=0.0, - index=None, - ): + def __init__(self, input_features, output_features, bias=True, has_fp16_weights=True, + memory_efficient_backward=False, threshold=0.0, index=None): <0> super().__init__( <1> input_features, output_features, bias <2> ) <3> self.state = bnb.MatmulLtState() <4> self.index = index <5> <6> self.state.threshold = threshold <7> self.state.has_fp16_weights = has_fp16_weights <8> self.state.memory_efficient_backward = memory_efficient_backward <9> if threshold > 0.0 and not has_fp16_weights: <10> self.state.use_pool = True <11> <12> self.weight = Int8Params( <13> self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights <14> ) <15>
===========unchanged ref 0=========== at: bitsandbytes.autograd._functions MatmulLtState() at: bitsandbytes.autograd._functions.MatmulLtState CB = None CxB = None SB = None SCB = None CxBt = None SBt = None CBt = None subB = None outlier_pool = None has_accumulated_gradients = False threshold = 0.0 idx = None is_training = True has_fp16_weights = True memory_efficient_backward = False use_pool = False formatB = F.get_special_format_str() at: bitsandbytes.nn.modules Int8Params(data: Tensor=..., requires_grad: builtins.bool=...) at: bitsandbytes.nn.modules.Int8Params.cuda self.data = CB at: torch.nn.modules.linear.Linear __constants__ = ['in_features', 'out_features'] in_features: int out_features: int weight: Tensor __init__(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) -> None __init__(self, in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) -> None ===========changed ref 0=========== # module: bitsandbytes.autograd._functions @dataclass class MatmulLtState: + tile_indices: Optional[torch.Tensor] = None + force_no_igemmlt: bool = False CB = None CxB = None SB = None SCB = None CxBt = None SBt = None CBt = None subB = None outlier_pool = None has_accumulated_gradients = False threshold = 0.0 idx = None is_training = True has_fp16_weights = True memory_efficient_backward = False use_pool = False formatB = F.get_special_format_str() ===========changed ref 1=========== # module: bitsandbytes.autograd._functions @dataclass class MatmulLtState: + def get_tile_size(self): + assert self.formatB in ( + "col_turing", + "col_ampere", + ), f"please find this assert and manually enter tile size for {self.formatB}" + return (8, 32) if self.formatB == "col_turing" else (32, 32) + ===========changed ref 2=========== # module: bitsandbytes.autograd._functions + def undo_layout(permuted_tensor: torch.Tensor, tile_indices: torch.LongTensor) -> torch.Tensor: + """ + Undo a tiled permutation such as turing or ampere layout + + :param permuted_tensor: torch tensor in a permuted layout + :param tile_indices: reverse transformation indices, from get_inverse_transform_indices + :return: contiguous row-major tensor + """ + (rows, cols), (tile_rows, tile_cols) = permuted_tensor.shape, tile_indices.shape + assert rows % tile_rows == cols % tile_cols == 0, "tensor must contain a whole number of tiles" + tensor = permuted_tensor.reshape(-1, tile_indices.numel()).t() + outputs = torch.empty_like(tensor) # note: not using .index_copy because it was slower on cuda + outputs[tile_indices.flatten()] = tensor + outputs = outputs.reshape(tile_rows, tile_cols, cols // tile_cols, rows // tile_rows) + outputs = outputs.permute(3, 0, 2, 1) # (rows // tile_rows, tile_rows), (cols // tile_cols, tile_cols) + return outputs.reshape(rows, cols).contiguous() + ===========changed ref 3=========== # module: bitsandbytes.autograd._functions + def get_inverse_transform_indices(transform_tile: callable, tile_size: Tuple[int, int]): + """ + Compute a permutation of indices that invert the specified (tiled) matrix transformation + + :param transform_tile: a function that applies forward transform to a tensor of shape [dim1, dim2] + :param tile_size: higher-level tile dimensions, i.e. (8, 32) for Turing and (32, 32) for Ampere + :note: we assume that tile_transform applies to a cpu-based int8 tensor of shape tile_size + :example: transform_tile function for the turing layout (bitsandbytes.functional as F) + :returns: indices + """ + d1, d2 = tile_size + assert 0 < d1 * d2 < 2**64 + tile_indices = torch.arange(d1 * d2, dtype=torch.int64).view(d1, d2) + # encode each position in tile as a tuple of <= 8 unique bytes + permuted_tile_indices = torch.zeros_like(tile_indices) + for i in range(8): + # select i-th byte, apply transformation and trace where each index ended up + ith_dim_indices = torch.div(tile_indices, 256**i, rounding_mode="trunc") % 256 + sample_tile_i = (ith_dim_indices - 128).to(torch.int8).contiguous() + assert torch.all(sample_tile_i.int() + 128 == ith_dim_indices), "int overflow" + permuted_tile_i = transform_tile(sample_tile_i) + ith_permuted_indices = permuted_tile_i.to(tile_indices.dtype) + 128 + permuted_tile_indices += ith_permuted_indices * (256**i) + if d1 * d2 < 256**i: + break # if all indices fit in i bytes, stop early + return perm</s> ===========changed ref 4=========== # module: bitsandbytes.autograd._functions + def get_inverse_transform_indices(transform_tile: callable, tile_size: Tuple[int, int]): # offset: 1 <s>1 * d2 < 256**i: + break # if all indices fit in i bytes, stop early + return permuted_tile_indices +
bitsandbytes.nn.modules/Linear8bitLt.forward
Modified
bitsandbytes-foundation~bitsandbytes
de535889348c5406eb34d9f7e0c362cadb113be5
Added Int8 matmul support for all GPUs. Full backward support.
<1>:<del> <6>:<add> if self.bias is not None and self.bias.dtype != x.dtype: <del> if self.bias is not None and self.bias.dtype != torch.float16: <7>:<add> self.bias.data = self.bias.data.to(x.dtype) <del> self.bias.data = self.bias.data.half() <10>:<del> <12>:<add> if self.state.CB is not None and self.state.CxB is not None: <del> if not self.state.memory_efficient_backward and self.state.CB is not None: <17>:<del> elif self.state.memory_efficient_backward and self.state.CxB is not None: <18>:<del> # For memory efficient backward, we convert 8-bit row major to turing/ampere format at each inference pass. <19>:<del> # Thus, we delete CxB from the state. <20>:<del> del self.state.CxB <21>:<del>
# module: bitsandbytes.nn.modules class Linear8bitLt(nn.Linear): + def forward(self, x: torch.Tensor): - def forward(self, x): <0> self.state.is_training = self.training <1> <2> if self.weight.CB is not None: <3> self.init_8bit_state() <4> <5> # weights are cast automatically as Int8Params, but the bias has to be cast manually <6> if self.bias is not None and self.bias.dtype != torch.float16: <7> self.bias.data = self.bias.data.half() <8> <9> out = bnb.matmul(x, self.weight, bias=self.bias, state=self.state) <10> <11> if not self.state.has_fp16_weights: <12> if not self.state.memory_efficient_backward and self.state.CB is not None: <13> # we converted 8-bit row major to turing/ampere format in the first inference pass <14> # we no longer need the row-major weight <15> del self.state.CB <16> self.weight.data = self.state.CxB <17> elif self.state.memory_efficient_backward and self.state.CxB is not None: <18> # For memory efficient backward, we convert 8-bit row major to turing/ampere format at each inference pass. <19> # Thus, we delete CxB from the state. <20> del self.state.CxB <21> <22> return out <23>
===========unchanged ref 0=========== at: bitsandbytes.autograd._functions matmul(A: tensor, B: tensor, out: tensor=None, state: MatmulLtState=None, threshold=0.0, bias=None) at: bitsandbytes.autograd._functions.MatmulLtState is_training = True has_fp16_weights = True memory_efficient_backward = False at: bitsandbytes.autograd._functions.MatmulLtState.reset_grads self.CB = None self.CxB = None at: bitsandbytes.nn.modules.Int8Params.__new__ cls.CB = None at: bitsandbytes.nn.modules.Int8Params.cuda self.data = CB at: bitsandbytes.nn.modules.Linear8bitLt init_8bit_state() at: bitsandbytes.nn.modules.Linear8bitLt.__init__ self.state = bnb.MatmulLtState() self.weight = Int8Params( self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights ) at: torch._C float16: dtype = ... at: torch._tensor.Tensor.__setstate__ self.data = state[0] at: torch.nn.modules.linear.Linear forward(self, input: Tensor) -> Tensor at: torch.nn.modules.linear.Linear.__init__ self.bias = Parameter(torch.empty(out_features, **factory_kwargs)) at: torch.nn.modules.module.Module.train self.training = mode ===========changed ref 0=========== <s> __init__( - self, - input_features, - output_features, - bias=True, - has_fp16_weights=True, - memory_efficient_backward=False, - threshold=0.0, - index=None, - ): + def __init__(self, input_features, output_features, bias=True, has_fp16_weights=True, + memory_efficient_backward=False, threshold=0.0, index=None): - super().__init__( + super().__init__(input_features, output_features, bias) - input_features, output_features, bias + assert not memory_efficient_backward, "memory_efficient_backward is no longer required and the argument is deprecated in 0.37.0 and will be removed in 0.39.0" - ) self.state = bnb.MatmulLtState() self.index = index self.state.threshold = threshold self.state.has_fp16_weights = has_fp16_weights self.state.memory_efficient_backward = memory_efficient_backward if threshold > 0.0 and not has_fp16_weights: self.state.use_pool = True - self.weight = Int8Params( + self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights) - self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights - ) ===========changed ref 1=========== # module: bitsandbytes.autograd._functions @dataclass class MatmulLtState: + def get_tile_size(self): + assert self.formatB in ( + "col_turing", + "col_ampere", + ), f"please find this assert and manually enter tile size for {self.formatB}" + return (8, 32) if self.formatB == "col_turing" else (32, 32) + ===========changed ref 2=========== # module: bitsandbytes.autograd._functions @dataclass class MatmulLtState: + tile_indices: Optional[torch.Tensor] = None + force_no_igemmlt: bool = False CB = None CxB = None SB = None SCB = None CxBt = None SBt = None CBt = None subB = None outlier_pool = None has_accumulated_gradients = False threshold = 0.0 idx = None is_training = True has_fp16_weights = True memory_efficient_backward = False use_pool = False formatB = F.get_special_format_str() ===========changed ref 3=========== # module: bitsandbytes.autograd._functions + def undo_layout(permuted_tensor: torch.Tensor, tile_indices: torch.LongTensor) -> torch.Tensor: + """ + Undo a tiled permutation such as turing or ampere layout + + :param permuted_tensor: torch tensor in a permuted layout + :param tile_indices: reverse transformation indices, from get_inverse_transform_indices + :return: contiguous row-major tensor + """ + (rows, cols), (tile_rows, tile_cols) = permuted_tensor.shape, tile_indices.shape + assert rows % tile_rows == cols % tile_cols == 0, "tensor must contain a whole number of tiles" + tensor = permuted_tensor.reshape(-1, tile_indices.numel()).t() + outputs = torch.empty_like(tensor) # note: not using .index_copy because it was slower on cuda + outputs[tile_indices.flatten()] = tensor + outputs = outputs.reshape(tile_rows, tile_cols, cols // tile_cols, rows // tile_rows) + outputs = outputs.permute(3, 0, 2, 1) # (rows // tile_rows, tile_rows), (cols // tile_cols, tile_cols) + return outputs.reshape(rows, cols).contiguous() + ===========changed ref 4=========== # module: bitsandbytes.autograd._functions + def get_inverse_transform_indices(transform_tile: callable, tile_size: Tuple[int, int]): + """ + Compute a permutation of indices that invert the specified (tiled) matrix transformation + + :param transform_tile: a function that applies forward transform to a tensor of shape [dim1, dim2] + :param tile_size: higher-level tile dimensions, i.e. (8, 32) for Turing and (32, 32) for Ampere + :note: we assume that tile_transform applies to a cpu-based int8 tensor of shape tile_size + :example: transform_tile function for the turing layout (bitsandbytes.functional as F) + :returns: indices + """ + d1, d2 = tile_size + assert 0 < d1 * d2 < 2**64 + tile_indices = torch.arange(d1 * d2, dtype=torch.int64).view(d1, d2) + # encode each position in tile as a tuple of <= 8 unique bytes + permuted_tile_indices = torch.zeros_like(tile_indices) + for i in range(8): + # select i-th byte, apply transformation and trace where each index ended up + ith_dim_indices = torch.div(tile_indices, 256**i, rounding_mode="trunc") % 256 + sample_tile_i = (ith_dim_indices - 128).to(torch.int8).contiguous() + assert torch.all(sample_tile_i.int() + 128 == ith_dim_indices), "int overflow" + permuted_tile_i = transform_tile(sample_tile_i) + ith_permuted_indices = permuted_tile_i.to(tile_indices.dtype) + 128 + permuted_tile_indices += ith_permuted_indices * (256**i) + if d1 * d2 < 256**i: + break # if all indices fit in i bytes, stop early + return perm</s>
bitsandbytes.cuda_setup.main/CUDASetup.initialize
Modified
bitsandbytes-foundation~bitsandbytes
0f5c3948709ae70cf733cefbd831aaea8a4e38c9
Added version 0.37.0.
<0>:<add> if not getattr(self, 'initialized', False): <add> self.has_printed = False <del> self.has_printed = False <1>:<add> self.lib = None <del> self.lib = None <2>:<add> self.initialized = False <del> self.initialized = False
# module: bitsandbytes.cuda_setup.main class CUDASetup: def initialize(self): <0> self.has_printed = False <1> self.lib = None <2> self.initialized = False <3>
bitsandbytes.cuda_setup.main/CUDASetup.run_cuda_setup
Modified
bitsandbytes-foundation~bitsandbytes
0f5c3948709ae70cf733cefbd831aaea8a4e38c9
Added version 0.37.0.
<18>:<add> if not binary_path.exists() or torch.cuda.is_available(): <del> if not binary_path.exists(): <27>:<add> self.add_log_entry('CUDA SETUP: The CUDA version for the compile might depend on your conda install. Inspect CUDA version via `conda list | grep cuda`.')
# module: bitsandbytes.cuda_setup.main class CUDASetup: def run_cuda_setup(self): <0> self.initialized = True <1> self.cuda_setup_log = [] <2> <3> binary_name, cudart_path, cuda, cc, cuda_version_string = evaluate_cuda_setup() <4> self.cudart_path = cudart_path <5> self.cuda = cuda <6> self.cc = cc <7> self.cuda_version_string = cuda_version_string <8> <9> package_dir = Path(__file__).parent.parent <10> binary_path = package_dir / binary_name <11> <12> try: <13> if not binary_path.exists(): <14> self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?") <15> legacy_binary_name = "libbitsandbytes_cpu.so" <16> self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...") <17> binary_path = package_dir / legacy_binary_name <18> if not binary_path.exists(): <19> self.add_log_entry('') <20> self.add_log_entry('='*48 + 'ERROR' + '='*37) <21> self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:') <22> self.add_log_entry('1. CUDA driver not installed') <23> self.add_log_entry('2. CUDA not installed') <24> self.add_log_entry('3. You have multiple conflicting CUDA libraries') <25> self.add_log_entry('4. Required library not pre-compiled for this bitsandbytes release!') <26> self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.') <27> self.add_log_entry('='*</s>
===========below chunk 0=========== # module: bitsandbytes.cuda_setup.main class CUDASetup: def run_cuda_setup(self): # offset: 1 self.add_log_entry('') self.generate_instructions() self.print_log_stack() raise Exception('CUDA SETUP: Setup Failed!') self.lib = ct.cdll.LoadLibrary(binary_path) else: self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path}...") self.lib = ct.cdll.LoadLibrary(binary_path) except Exception as ex: self.add_log_entry(str(ex)) self.print_log_stack() ===========changed ref 0=========== # module: bitsandbytes.cuda_setup.main class CUDASetup: def initialize(self): + if not getattr(self, 'initialized', False): + self.has_printed = False - self.has_printed = False + self.lib = None - self.lib = None + self.initialized = False - self.initialized = False
bitsandbytes.cuda_setup.main/is_cublasLt_compatible
Modified
bitsandbytes-foundation~bitsandbytes
0f5c3948709ae70cf733cefbd831aaea8a4e38c9
Added version 0.37.0.
<4>:<add> cuda_setup.add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True) <del> cuda_setup.add_log_entry("WARNING: Compute capability < 7.5 detected! Proceeding to load CPU-only library...", is_warning=True)
# module: bitsandbytes.cuda_setup.main def is_cublasLt_compatible(cc): <0> has_cublaslt = False <1> if cc is not None: <2> cc_major, cc_minor = cc.split('.') <3> if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5): <4> cuda_setup.add_log_entry("WARNING: Compute capability < 7.5 detected! Proceeding to load CPU-only library...", is_warning=True) <5> else: <6> has_cublaslt = True <7> return has_cublaslt <8>
===========changed ref 0=========== # module: bitsandbytes.cuda_setup.main class CUDASetup: def initialize(self): + if not getattr(self, 'initialized', False): + self.has_printed = False - self.has_printed = False + self.lib = None - self.lib = None + self.initialized = False - self.initialized = False ===========changed ref 1=========== # module: bitsandbytes.cuda_setup.main class CUDASetup: def run_cuda_setup(self): self.initialized = True self.cuda_setup_log = [] binary_name, cudart_path, cuda, cc, cuda_version_string = evaluate_cuda_setup() self.cudart_path = cudart_path self.cuda = cuda self.cc = cc self.cuda_version_string = cuda_version_string package_dir = Path(__file__).parent.parent binary_path = package_dir / binary_name try: if not binary_path.exists(): self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?") legacy_binary_name = "libbitsandbytes_cpu.so" self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...") binary_path = package_dir / legacy_binary_name + if not binary_path.exists() or torch.cuda.is_available(): - if not binary_path.exists(): self.add_log_entry('') self.add_log_entry('='*48 + 'ERROR' + '='*37) self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:') self.add_log_entry('1. CUDA driver not installed') self.add_log_entry('2. CUDA not installed') self.add_log_entry('3. You have multiple conflicting CUDA libraries') self.add_log_entry('4. Required library not pre-compiled for this bitsandbytes release!') self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.') + self.add_log_entry('CUDA SETUP: The CU</s> ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main class CUDASetup: def run_cuda_setup(self): # offset: 1 <s>, `make CUDA_VERSION=113`.') + self.add_log_entry('CUDA SETUP: The CUDA version for the compile might depend on your conda install. Inspect CUDA version via `conda list | grep cuda`.') self.add_log_entry('='*80) self.add_log_entry('') self.generate_instructions() self.print_log_stack() raise Exception('CUDA SETUP: Setup Failed!') self.lib = ct.cdll.LoadLibrary(binary_path) else: self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path}...") self.lib = ct.cdll.LoadLibrary(binary_path) except Exception as ex: self.add_log_entry(str(ex)) self.print_log_stack()
bitsandbytes.cuda_setup.main/evaluate_cuda_setup
Modified
bitsandbytes-foundation~bitsandbytes
0f5c3948709ae70cf733cefbd831aaea8a4e38c9
Added version 0.37.0.
<4>:<del> print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link')
# module: bitsandbytes.cuda_setup.main def evaluate_cuda_setup(): <0> if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0': <1> print('') <2> print('='*35 + 'BUG REPORT' + '='*35) <3> print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues') <4> print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link') <5> print('='*80) <6> if not torch.cuda.is_available(): return 'libsbitsandbytes_cpu.so', None, None, None, None <7> <8> cuda_setup = CUDASetup.get_instance() <9> cudart_path = determine_cuda_runtime_lib_path() <10> cuda = get_cuda_lib_handle() <11> cc = get_compute_capability(cuda) <12> cuda_version_string = get_cuda_version(cuda, cudart_path) <13> <14> failure = False <15> if cudart_path is None: <16> failure = True <17> cuda_setup.add_log_entry("WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!", is_warning=True) <18> else: <19> cuda_setup.add_log_entry(f"CUDA SETUP: CUDA runtime path found: {cudart_path}") <20> <21> if cc == '' or cc is None: <22> failure = True <23> cuda_setup.add_log_entry("WARNING: No GPU detected! Check your CUDA paths. Proceeding to load CPU-only</s>
===========below chunk 0=========== # module: bitsandbytes.cuda_setup.main def evaluate_cuda_setup(): # offset: 1 else: cuda_setup.add_log_entry(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}") if cuda is None: failure = True else: cuda_setup.add_log_entry(f'CUDA SETUP: Detected CUDA version {cuda_version_string}') # 7.5 is the minimum CC vor cublaslt has_cublaslt = is_cublasLt_compatible(cc) # TODO: # (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible) # (2) Multiple CUDA versions installed # we use ls -l instead of nvcc to determine the cuda version # since most installations will have the libcudart.so installed, but not the compiler if failure: binary_name = "libbitsandbytes_cpu.so" elif has_cublaslt: binary_name = f"libbitsandbytes_cuda{cuda_version_string}.so" else: "if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so" binary_name = f"libbitsandbytes_cuda{cuda_version_string}_nocublaslt.so" return binary_name, cudart_path, cuda, cc, cuda_version_string ===========changed ref 0=========== # module: bitsandbytes.cuda_setup.main class CUDASetup: def initialize(self): + if not getattr(self, 'initialized', False): + self.has_printed = False - self.has_printed = False + self.lib = None - self.lib = None + self.initialized = False - self.initialized = False ===========changed ref 1=========== # module: bitsandbytes.cuda_setup.main def is_cublasLt_compatible(cc): has_cublaslt = False if cc is not None: cc_major, cc_minor = cc.split('.') if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5): + cuda_setup.add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True) - cuda_setup.add_log_entry("WARNING: Compute capability < 7.5 detected! Proceeding to load CPU-only library...", is_warning=True) else: has_cublaslt = True return has_cublaslt ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main class CUDASetup: def run_cuda_setup(self): self.initialized = True self.cuda_setup_log = [] binary_name, cudart_path, cuda, cc, cuda_version_string = evaluate_cuda_setup() self.cudart_path = cudart_path self.cuda = cuda self.cc = cc self.cuda_version_string = cuda_version_string package_dir = Path(__file__).parent.parent binary_path = package_dir / binary_name try: if not binary_path.exists(): self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?") legacy_binary_name = "libbitsandbytes_cpu.so" self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...") binary_path = package_dir / legacy_binary_name + if not binary_path.exists() or torch.cuda.is_available(): - if not binary_path.exists(): self.add_log_entry('') self.add_log_entry('='*48 + 'ERROR' + '='*37) self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:') self.add_log_entry('1. CUDA driver not installed') self.add_log_entry('2. CUDA not installed') self.add_log_entry('3. You have multiple conflicting CUDA libraries') self.add_log_entry('4. Required library not pre-compiled for this bitsandbytes release!') self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.') + self.add_log_entry('CUDA SETUP: The CU</s> ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.main class CUDASetup: def run_cuda_setup(self): # offset: 1 <s>, `make CUDA_VERSION=113`.') + self.add_log_entry('CUDA SETUP: The CUDA version for the compile might depend on your conda install. Inspect CUDA version via `conda list | grep cuda`.') self.add_log_entry('='*80) self.add_log_entry('') self.generate_instructions() self.print_log_stack() raise Exception('CUDA SETUP: Setup Failed!') self.lib = ct.cdll.LoadLibrary(binary_path) else: self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path}...") self.lib = ct.cdll.LoadLibrary(binary_path) except Exception as ex: self.add_log_entry(str(ex)) self.print_log_stack()
tests.test_linear8bitlt/test_linear_no_igemmlt
Modified
bitsandbytes-foundation~bitsandbytes
58b09ee1b11c26344aaeee7ff648655f1d127202
[WIP] Implement proper serialization of Linear8bitLt
<15>:<add> linear_custom = linear_custom.cuda() <del> linear = linear_custom.cuda()
# module: tests.test_linear8bitlt @pytest.mark.skipif(not torch.cuda.is_available(), reason="this test requires a GPU") def test_linear_no_igemmlt(): <0> linear = torch.nn.Linear(1024, 3072) <1> x = torch.randn(3, 1024, dtype=torch.half) <2> linear_custom = Linear8bitLt( <3> linear.in_features, <4> linear.out_features, <5> linear.bias is not None, <6> has_fp16_weights=False, <7> threshold=6.0, <8> ) <9> linear_custom.state.force_no_igemmlt = True <10> <11> linear_custom.weight = bnb.nn.Int8Params( <12> linear.weight.data.clone(), requires_grad=False, has_fp16_weights=False <13> ).to(linear.weight.dtype) <14> linear_custom.bias = linear.bias <15> linear = linear_custom.cuda() <16> linear = linear.half().cuda() <17> <18> x_ref = x.clone().cuda().requires_grad_(True) <19> x_ours = x.clone().cuda().requires_grad_(True) <20> fx_ref = linear(x_ref).float() <21> grad_proj = torch.randn_like(fx_ref) <22> (fx_ref * grad_proj).mean().backward() <23> <24> fx_ours = linear_custom(x_ours).float() <25> (fx_ours * grad_proj).mean().backward() <26> assert torch.allclose(fx_ref, fx_ours, atol=0.02) <27> assert torch.allclose(x_ref.grad, x_ours.grad, atol=0.01) <28> assert not linear_custom.state.has_fp16_weights <29> assert linear_custom.state.CB is not None <30> assert linear_custom.state.CxB is None <31>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.mark.structures.MarkGenerator skip: _SkipMarkDecorator skipif: _SkipifMarkDecorator xfail: _XfailMarkDecorator parametrize: _ParametrizeMarkDecorator usefixtures: _UsefixturesMarkDecorator filterwarnings: _FilterwarningsMarkDecorator at: bitsandbytes.autograd._functions.MatmulLtState tile_indices: Optional[torch.Tensor] = None force_no_igemmlt: bool = False CB = None CxB = None SB = None SCB = None CxBt = None SBt = None CBt = None subB = None outlier_pool = None has_accumulated_gradients = False threshold = 0.0 idx = None is_training = True has_fp16_weights = True memory_efficient_backward = False use_pool = False formatB = F.get_special_format_str() at: bitsandbytes.nn.modules Int8Params(data: Tensor=..., requires_grad: builtins.bool=...) Linear8bitLt(input_features, output_features, bias=True, has_fp16_weights=True, memory_efficient_backward=False, threshold=0.0, index=None) at: bitsandbytes.nn.modules.Int8Params to(device: Optional[Union[int, device]]=..., dtype: Optional[Union[dtype, str]]=..., non_blocking: bool=...) -> T to(tensor: Tensor, non_blocking: bool=...) -> T to(dtype: Union[dtype, str], non_blocking: bool=...) -> T at: bitsandbytes.nn.modules.Linear8bitLt.__init__ self.state = bnb.MatmulLtState() ===========unchanged ref 1=========== self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights) at: tests.test_linear8bitlt.test_layout_exact_match x = (torch.randn(14336 * 3, 14336) * 10).to(torch.int8).cuda() restored_x = undo_layout(cxb, tile_indices) at: torch._C half: dtype = ... at: torch._C._VariableFunctions all(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor all(input: Tensor, dim: _int, keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor all(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool=False, *, out: Optional[Tensor]=None) -> Tensor allclose(input: Tensor, other: Tensor, rtol: _float=1e-05, atol: _float=1e-08, equal_nan: _bool=False) -> _bool eq(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor eq(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor]=None) -> Tensor ===========unchanged ref 2=========== randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor randn(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor randn(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device:</s> ===========unchanged ref 3=========== randn_like(input: Tensor, *, memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch._tensor.Tensor.__setstate__ self.data = state[0] at: torch.cuda is_available() -> bool at: torch.nn.modules.linear Linear(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) at: torch.nn.modules.linear.Linear.__init__ self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs)) self.bias = Parameter(torch.empty(out_features, **factory_kwargs)) at: torch.nn.modules.module.Module dump_patches: bool = False _version: int = 1 training: bool _parameters: Dict[str, Optional[Parameter]] _buffers: Dict[str, Optional[Tensor]] _non_persistent_buffers_set: Set[str] _backward_pre_hooks: Dict[int, Callable] _backward_hooks: Dict[int, Callable] _is_full_backward_hook: Optional[bool] _forward_hooks: Dict[int, Callable] _forward_hooks_with_kwargs: Dict[int, bool] _forward_hooks_always_called: Dict[int, bool] _forward_pre_hooks: Dict[int, Callable] _forward_pre_hooks_with_kwargs: Dict[int, bool] _state_dict_hooks: Dict[int, Callable] _load_state_dict_pre_hooks: Dict[int, Callable]
tests.test_linear8bitlt/test_linear_serialization
Modified
bitsandbytes-foundation~bitsandbytes
ac3ab281e39cbc514ebef08823482d5b0cba42c1
Handle more cases in test_linear_serialization
<0>:<add> linear = torch.nn.Linear(32, 96) <del> linear = torch.nn.Linear(16, 32) <1>:<add> x = torch.randn(3, 32, dtype=torch.half) <del> x = torch.randn(3, 16, dtype=torch.half) <10>:<del> linear_custom.state.force_no_igemmlt = True <12>:<add> linear.weight.data.clone(), requires_grad=has_fp16_weights, has_fp16_weights=has_fp16_weights <del> linear.weight.data.clone(), requires_grad=False, has_fp16_weights=has_fp16_weights <13>:<add> ) <del> ).to(linear.weight.dtype) <16>:<add> <add> if serialize_before_forward: <add> state_dict_8bit = linear_custom.state_dict() <22>:<add> if not serialize_before_forward: <add> state_dict_8bit = linear_custom.state_dict() <del> state_dict = deepcopy(linear_custom.state_dict()) <23>:<add> <add> with TemporaryDirectory() as tmpdir: <add> state_path_8bit = os.path.join(tmpdir, "state_8bit.pth") <add> state_path = os.path.join(tmpdir, "state.pth") <add> <add> torch.save(linear.state_dict(), state_path) <add> torch.save(state_dict_8bit,
<s>") + @pytest.mark.parametrize("has_fp16_weights, serialize_before_forward, deserialize_before_cuda", + list(product([False, True], [False, True], [False, True]))) + def test_linear_serialization(has_fp16_weights, serialize_before_forward, deserialize_before_cuda): - @pytest.mark.parametrize("has_fp16_weights", [False, True]) - def test_linear_serialization(has_fp16_weights): <0> linear = torch.nn.Linear(16, 32) <1> x = torch.randn(3, 16, dtype=torch.half) <2> <3> linear_custom = Linear8bitLt( <4> linear.in_features, <5> linear.out_features, <6> linear.bias is not None, <7> has_fp16_weights=has_fp16_weights, <8> threshold=6.0, <9> ) <10> linear_custom.state.force_no_igemmlt = True <11> linear_custom.weight = bnb.nn.Int8Params( <12> linear.weight.data.clone(), requires_grad=False, has_fp16_weights=has_fp16_weights <13> ).to(linear.weight.dtype) <14> linear_custom.bias = linear.bias <15> linear_custom = linear_custom.cuda() <16> <17> x_first = x.clone().cuda().requires_grad_(True) <18> fx_first = linear_custom(x_first).float() <19> grad_proj = torch.randn_like(fx_first) <20> (fx_first * grad_proj).mean().backward() <21> <22> state_dict = deepcopy(linear_custom.state_dict()) <23> <24> new_linear_custom = Linear8bitLt( <25> linear.in_features, <26> linear.out_features, <27> linear.bias is not None, <28> has_fp16_weights=has_fp16_weights, <29> threshold=6.0, <30> ) <31> </s>
===========below chunk 0=========== <s>test.mark.parametrize("has_fp16_weights, serialize_before_forward, deserialize_before_cuda", + list(product([False, True], [False, True], [False, True]))) + def test_linear_serialization(has_fp16_weights, serialize_before_forward, deserialize_before_cuda): - @pytest.mark.parametrize("has_fp16_weights", [False, True]) - def test_linear_serialization(has_fp16_weights): # offset: 1 new_linear_custom = new_linear_custom.cuda() new_linear_custom.load_state_dict(state_dict, strict=True) x_second = x.clone().cuda().requires_grad_(True) fx_second = new_linear_custom(x_second).float() (fx_second * grad_proj).mean().backward() assert torch.allclose(fx_first, fx_second, atol=1e-5) assert torch.allclose(x_first.grad, x_second.grad, atol=1e-5) ===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.mark.structures.MarkGenerator skip: _SkipMarkDecorator skipif: _SkipifMarkDecorator xfail: _XfailMarkDecorator parametrize: _ParametrizeMarkDecorator usefixtures: _UsefixturesMarkDecorator filterwarnings: _FilterwarningsMarkDecorator at: bitsandbytes.autograd._functions.MatmulLtState.reset_grads self.CxB = None at: bitsandbytes.nn.modules Int8Params(data: Tensor=..., requires_grad: builtins.bool=...) Linear8bitLt(input_features, output_features, bias=True, has_fp16_weights=True, memory_efficient_backward=False, threshold=0.0, index=None) at: bitsandbytes.nn.modules.Linear8bitLt.__init__ self.state = bnb.MatmulLtState() self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights) ===========unchanged ref 1=========== at: itertools product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4], iter5: Iterable[_T5], iter6: Iterable[_T6]) -> Iterator[Tuple[_T1, _T2, _T3, _T4, _T5, _T6]] product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3]) -> Iterator[Tuple[_T1, _T2, _T3]] product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4]) -> Iterator[Tuple[_T1, _T2, _T3, _T4]] product(iter1: Iterable[_T1], iter2: Iterable[_T2]) -> Iterator[Tuple[_T1, _T2]] product(*iterables: Iterable[_T1], repeat: int) -> Iterator[Tuple[_T1, ...]] product(iter1: Iterable[_T1]) -> Iterator[Tuple[_T1]] product(*iterables: Iterable[Any], repeat: int=...) -> Iterator[Tuple[Any, ...]] product(iter1: Iterable[Any], iter2: Iterable[Any], iter3: Iterable[Any], iter4: Iterable[Any], iter5: Iterable[Any], iter6: Iterable[Any], iter7: Iterable[Any], *iterables: Iterable[Any]) -> Iterator[Tuple[Any, ...]] product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4], iter5: Iterable[_T5]) -> Iterator[Tuple[_T1, _T2, _T3, _T4, _T5]] at: os.path join(a: StrPath, *paths: StrPath) -> str join(a: BytesPath, *paths: BytesPath) -> bytes getsize(filename: AnyPath) -> int ===========unchanged ref 2=========== at: tempfile TemporaryDirectory(suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=...) at: tests.test_linear8bitlt.test_linear_no_igemmlt linear_custom = linear_custom.cuda() at: torch._C half: dtype = ... ===========unchanged ref 3=========== at: torch._C._VariableFunctions randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor randn(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor randn(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional</s>
bitsandbytes.nn.modules/Linear8bitLt._load_from_state_dict
Modified
bitsandbytes-foundation~bitsandbytes
cd4d904a4ccc80c444e460d3aef20705895d2051
Raise an error when loading a quantized checkpoint before quantization
<5>:<add> if self.weight.SCB is None: <add> # buffers not yet initialized, can't call them directly without <add> raise RuntimeError("Loading a quantized checkpoint into non-quantized Linear8bitLt is " <add> "not supported. Please call module.cuda() before module.load_state_dict()") <add>
# module: bitsandbytes.nn.modules class Linear8bitLt(nn.Linear): def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): <0> super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, <1> error_msgs) <2> for key in unexpected_keys: <3> input_name = key[len(prefix):] <4> if input_name == "SCB": <5> input_param = state_dict[key] <6> self.weight.SCB.copy_(input_param) <7> unexpected_keys.remove(key) <8>
===========unchanged ref 0=========== at: bitsandbytes.nn.modules.Int8Params.__new__ cls.SCB = None at: bitsandbytes.nn.modules.Linear8bitLt.__init__ self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights) at: torch.nn.modules.module.Module dump_patches: bool = False _version: int = 1 training: bool _parameters: Dict[str, Optional[Parameter]] _buffers: Dict[str, Optional[Tensor]] _non_persistent_buffers_set: Set[str] _backward_pre_hooks: Dict[int, Callable] _backward_hooks: Dict[int, Callable] _is_full_backward_hook: Optional[bool] _forward_hooks: Dict[int, Callable] _forward_hooks_with_kwargs: Dict[int, bool] _forward_hooks_always_called: Dict[int, bool] _forward_pre_hooks: Dict[int, Callable] _forward_pre_hooks_with_kwargs: Dict[int, bool] _state_dict_hooks: Dict[int, Callable] _load_state_dict_pre_hooks: Dict[int, Callable] _state_dict_pre_hooks: Dict[int, Callable] _load_state_dict_post_hooks: Dict[int, Callable] _modules: Dict[str, Optional['Module']] call_super_init: bool = False _compiled_call_impl : Optional[Callable] = None forward: Callable[..., Any] = _forward_unimplemented __call__ : Callable[..., Any] = _wrapped_call_impl T_destination = TypeVar('T_destination', bound=Dict[str, Any]) ===========unchanged ref 1=========== state_dict(self, *, prefix: str=..., keep_vars: bool=...) -> Dict[str, Any] state_dict(self, *, destination: T_destination, prefix: str=..., keep_vars: bool=...) -> T_destination _load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
bitsandbytes.nn.modules/Linear8bitLt._save_to_state_dict
Modified
bitsandbytes-foundation~bitsandbytes
cc608c04c292c906ff48223e386689c1c024f601
Revert the layout if weights were reordered
<0>:<add> if not self.state.has_fp16_weights and self.state.CB is None and self.state.CxB is not None: <add> # reorder weight layout back from ampere/turing to row <add> reorder_layout = True <add> weight_clone = self.weight.data.clone() <add> else: <add> reorder_layout = False <del> super()._save_to_state_dict(destination, prefix, keep_vars) <2>:<add> try: <add> if reorder_layout: <add> if self.state.tile_indices is None: <add> order, tile_size = self.state.formatB, self.state.get_tile_size() <add> transform = lambda x: \ <add> bitsandbytes.functional.transform(x.to(self.weight.data.device), from_order="row", <add> to_order=order)[0].to(x.device) <add> with torch.no_grad(): <add> self.state.tile_indices = get_inverse_transform_indices(transform, tile_size).to( <add> self.state.CxB.device) <del> # we only need to save SCB as extra data, because CB for quantized weights is already stored in weight.data <3>:<del> weight_name = "SCB" <5>:<del> # case 1: .cuda was called, SCB is in self.weight
# module: bitsandbytes.nn.modules class Linear8bitLt(nn.Linear): def _save_to_state_dict(self, destination, prefix, keep_vars): <0> super()._save_to_state_dict(destination, prefix, keep_vars) <1> <2> # we only need to save SCB as extra data, because CB for quantized weights is already stored in weight.data <3> weight_name = "SCB" <4> <5> # case 1: .cuda was called, SCB is in self.weight <6> param_from_weight = getattr(self.weight, weight_name) <7> # case 2: self.init_8bit_state was called, SCB is in self.state <8> param_from_state = getattr(self.state, weight_name) <9> <10> key_name = prefix + f"{weight_name}" <11> if param_from_weight is not None: <12> destination[key_name] = param_from_weight if keep_vars else param_from_weight.detach() <13> elif not self.state.has_fp16_weights and param_from_state is not None: <14> destination[key_name] = param_from_state if keep_vars else param_from_state.detach() <15>
===========unchanged ref 0=========== at: bitsandbytes.autograd._functions.MatmulLtState tile_indices: Optional[torch.Tensor] = None force_no_igemmlt: bool = False CB = None CxB = None SB = None SCB = None CxBt = None SBt = None CBt = None subB = None outlier_pool = None has_accumulated_gradients = False threshold = 0.0 idx = None is_training = True has_fp16_weights = True memory_efficient_backward = False use_pool = False formatB = F.get_special_format_str() get_tile_size() at: bitsandbytes.autograd._functions.MatmulLtState.reset_grads self.CB = None self.CxB = None at: bitsandbytes.functional transform(A, to_order, from_order='row', out=None, transpose=False, state=None, ld=None) at: bitsandbytes.nn.modules Int8Params(data: Tensor=..., requires_grad: builtins.bool=...) at: bitsandbytes.nn.modules.Int8Params.cuda self.data = CB at: bitsandbytes.nn.modules.Linear8bitLt.__init__ self.state = bnb.MatmulLtState() at: torch.nn.modules.module.Module dump_patches: bool = False _version: int = 1 training: bool _parameters: Dict[str, Optional[Parameter]] _buffers: Dict[str, Optional[Tensor]] _non_persistent_buffers_set: Set[str] _backward_pre_hooks: Dict[int, Callable] _backward_hooks: Dict[int, Callable] _is_full_backward_hook: Optional[bool] ===========unchanged ref 1=========== _forward_hooks: Dict[int, Callable] _forward_hooks_with_kwargs: Dict[int, bool] _forward_hooks_always_called: Dict[int, bool] _forward_pre_hooks: Dict[int, Callable] _forward_pre_hooks_with_kwargs: Dict[int, bool] _state_dict_hooks: Dict[int, Callable] _load_state_dict_pre_hooks: Dict[int, Callable] _state_dict_pre_hooks: Dict[int, Callable] _load_state_dict_post_hooks: Dict[int, Callable] _modules: Dict[str, Optional['Module']] call_super_init: bool = False _compiled_call_impl : Optional[Callable] = None forward: Callable[..., Any] = _forward_unimplemented __call__ : Callable[..., Any] = _wrapped_call_impl _save_to_state_dict(self, destination, prefix, keep_vars) T_destination = TypeVar('T_destination', bound=Dict[str, Any])
bitsandbytes.autograd._functions/MatMul8bitLt.backward
Modified
bitsandbytes-foundation~bitsandbytes
d15822a54b5a41e7b35c233616eb77cea337a06c
Refactor _tile_indices into a cached property, fix device bug
# module: bitsandbytes.autograd._functions class MatMul8bitLt(torch.autograd.Function): @staticmethod def backward(ctx, grad_output): <0> if ctx.is_empty: <1> bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias) <2> return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None <3> req_gradA, req_gradB, _, req_gradBias, _ = ctx.needs_input_grad <4> CAt, subA = ctx.tensors <5> SCAt, idx = ctx.tensor_states <6> formatB = ctx.formatB <7> state = ctx.state <8> grad_A = grad_B = grad_bias = None <9> <10> if req_gradBias: <11> # compute grad_bias first before changing grad_output dtype <12> grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias) <13> <14> # Cast grad_output to fp16 <15> if len(grad_output.shape) == 3: <16> grad_output = grad_output.reshape(-1, grad_output.shape[-1]).contiguous() <17> <18> Cgrad, Cgradt, SCgrad, SCgradt, coo_tensor = F.double_quant(grad_output.to(torch.float16)) <19> if req_gradB: <20> CxAt, SAt = F.transform(CAt, formatB, transpose=True) <21> C32grad, Sgrad = F.transform(Cgradt, "col32", transpose=True) <22> gradB32, SgradB32 = F.igemmlt(C32grad, CxAt, Sgrad, SAt) <23> grad_B = F.mm_dequant(gradB32, SgradB32, SCgradt, SCAt) <24> if state.threshold > 0.0 and subA is not None: <25> grad_B[:, idx] += torch.matmul(grad_output.t(), subA)</s>
===========below chunk 0=========== # module: bitsandbytes.autograd._functions class MatMul8bitLt(torch.autograd.Function): @staticmethod def backward(ctx, grad_output): # offset: 1 if req_gradA: if state.CBt is not None: C32grad, Sgrad = F.transform(Cgrad, "col32") if state.CxBt is None: state.CxBt, state.SBt = F.transform(state.CBt, to_order=formatB, transpose=True) gradA32, SgradA32 = F.igemmlt(C32grad, state.CxBt, Sgrad, state.SBt) grad_A = F.mm_dequant(gradA32, SgradA32, SCgrad, state.SCBt).view(ctx.grad_shape).to(ctx.dtype_A) elif state.CB is not None: CB = state.CB.to(ctx.dtype_A, copy=True).mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0)) grad_A = torch.matmul(grad_output, CB).view(ctx.grad_shape).to(ctx.dtype_A) elif state.CxB is not None: if state.tile_indices is None: order, tile_size = state.formatB, state.get_tile_size() transform = lambda x: F.transform(x.cuda(), from_order="row", to_order=order)[0].to(x.device) with torch.no_grad(): state.tile_indices = get_inverse_transform_indices(transform, tile_size).to(state.CxB.device) CB = ( undo_layout(state.CxB, state.tile_indices) .to(ctx.dtype_A) .mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0)) ) grad_A = torch.matmul(grad_output, CB).view(ctx.grad_shape).</s> ===========below chunk 1=========== # module: bitsandbytes.autograd._functions class MatMul8bitLt(torch.autograd.Function): @staticmethod def backward(ctx, grad_output): # offset: 2 <s>0)) ) grad_A = torch.matmul(grad_output, CB).view(ctx.grad_shape).to(ctx.dtype_A) else: raise Exception("State must contain either CBt or CB or CxB matrix for backward") return grad_A, grad_B, None, grad_bias, None ===========unchanged ref 0=========== at: bitsandbytes.autograd._functions undo_layout(permuted_tensor: torch.Tensor, tile_indices: torch.LongTensor) -> torch.Tensor at: bitsandbytes.autograd._functions.MatMul8bitLt.forward CA, CAt, SCA, SCAt, coo_tensorA = F.double_quant(A.to(torch.float16), threshold=state.threshold) output_shape = (input_shape[0], shapeB[0]) output_shape = (input_shape[0], input_shape[1], shapeB[0]) output = output.to(A.dtype) output = output.mul_(state.SCB.unsqueeze(0).mul(1.0 / 127.0)) output += torch.matmul(subA, state.subB) output = torch.nn.functional.linear(A_wo_outliers, state.CB.to(A.dtype)) output = F.mm_dequant(out32, Sout32, SCA, state.SCB, bias=bias) at: bitsandbytes.autograd._functions.MatmulLtState _tile_indices: Optional[torch.Tensor] = None force_no_igemmlt: bool = False CB = None CxB = None SB = None SCB = None CxBt = None SBt = None CBt = None subB = None outlier_pool = None has_accumulated_gradients = False threshold = 0.0 idx = None is_training = True has_fp16_weights = True memory_efficient_backward = False use_pool = False formatB = F.get_special_format_str() at: bitsandbytes.functional igemmlt(A, B, SA, SB, out=None, Sout=None, dtype=torch.int32) ===========unchanged ref 1=========== mm_dequant(A, quant_state, row_stats, col_stats, out=None, new_row_stats=None, new_col_stats=None, bias=None) double_quant(A, col_stats=None, row_stats=None, out_col=None, out_row=None, threshold=0.0) transform(A, to_order, from_order='row', out=None, transpose=False, state=None, ld=None) at: torch._C float16: dtype = ... at: torch._C._VariableFunctions clone(input: Tensor, *, memory_format: Optional[memory_format]=None) -> Tensor matmul(input: Tensor, other: Tensor, *, out: Optional[Tensor]=None) -> Tensor zeros_like(input: Tensor, *, memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch.autograd.function._SingleLevelFunction backward(ctx: Any, *grad_outputs: Any) -> Any vjp = backward ===========changed ref 0=========== # module: bitsandbytes.autograd._functions @dataclass class MatmulLtState: + @property + def tile_indices(self): + if self._tile_indices is None: + device = self.CxB.device + transform = lambda x: F.transform(x.to(device), from_order="row", to_order=self.formatB)[0].to(x.device) + with torch.no_grad(): + self._tile_indices = get_inverse_transform_indices(transform, self.get_tile_size()).to(device) + return self._tile_indices + ===========changed ref 1=========== # module: bitsandbytes.autograd._functions @dataclass class MatmulLtState: + _tile_indices: Optional[torch.Tensor] = None - tile_indices: Optional[torch.Tensor] = None force_no_igemmlt: bool = False CB = None CxB = None SB = None SCB = None CxBt = None SBt = None CBt = None subB = None outlier_pool = None has_accumulated_gradients = False threshold = 0.0 idx = None is_training = True has_fp16_weights = True memory_efficient_backward = False use_pool = False formatB = F.get_special_format_str()
bitsandbytes.nn.modules/Linear8bitLt._save_to_state_dict
Modified
bitsandbytes-foundation~bitsandbytes
d15822a54b5a41e7b35c233616eb77cea337a06c
Refactor _tile_indices into a cached property, fix device bug
<9>:<del> if self.state.tile_indices is None: <10>:<del> order, tile_size = self.state.formatB, self.state.get_tile_size() <11>:<del> transform = lambda x: \ <12>:<del> bitsandbytes.functional.transform(x.to(self.weight.data.device), from_order="row", <13>:<del> to_order=order)[0].to(x.device) <14>:<del> with torch.no_grad(): <15>:<del> self.state.tile_indices = get_inverse_transform_indices(transform, tile_size).to( <16>:<del> self.state.CxB.device) <17>:<del> <18>:<del> CB = ( <19>:<add> self.weight.data = undo_layout(self.state.CxB, self.state.tile_indices) <del> undo_layout(self.state.CxB, self.state.tile_indices) <20>:<del> ) <21>:<del> <22>:<del> self.weight.data = CB
# module: bitsandbytes.nn.modules class Linear8bitLt(nn.Linear): def _save_to_state_dict(self, destination, prefix, keep_vars): <0> if not self.state.has_fp16_weights and self.state.CB is None and self.state.CxB is not None: <1> # reorder weight layout back from ampere/turing to row <2> reorder_layout = True <3> weight_clone = self.weight.data.clone() <4> else: <5> reorder_layout = False <6> <7> try: <8> if reorder_layout: <9> if self.state.tile_indices is None: <10> order, tile_size = self.state.formatB, self.state.get_tile_size() <11> transform = lambda x: \ <12> bitsandbytes.functional.transform(x.to(self.weight.data.device), from_order="row", <13> to_order=order)[0].to(x.device) <14> with torch.no_grad(): <15> self.state.tile_indices = get_inverse_transform_indices(transform, tile_size).to( <16> self.state.CxB.device) <17> <18> CB = ( <19> undo_layout(self.state.CxB, self.state.tile_indices) <20> ) <21> <22> self.weight.data = CB <23> <24> super()._save_to_state_dict(destination, prefix, keep_vars) <25> <26> # we only need to save SCB as extra data, because CB for quantized weights is already stored in weight.data <27> weight_name = "SCB" <28> <29> # case 1: .cuda was called, SCB is in self.weight <30> param_from_weight = getattr(self.weight, weight_name) <31> # case 2: self.init_8bit_state was called, SCB is in self.state <32> param_from_state = getattr(self.state, weight_name) <33> <34> key_name = prefix + f"{weight_name}" <35> if param_from</s>
===========below chunk 0=========== # module: bitsandbytes.nn.modules class Linear8bitLt(nn.Linear): def _save_to_state_dict(self, destination, prefix, keep_vars): # offset: 1 destination[key_name] = param_from_weight if keep_vars else param_from_weight.detach() elif not self.state.has_fp16_weights and param_from_state is not None: destination[key_name] = param_from_state if keep_vars else param_from_state.detach() finally: if reorder_layout: self.weight.data = weight_clone ===========unchanged ref 0=========== at: bitsandbytes.autograd._functions undo_layout(permuted_tensor: torch.Tensor, tile_indices: torch.LongTensor) -> torch.Tensor at: bitsandbytes.autograd._functions.MatmulLtState _tile_indices: Optional[torch.Tensor] = None force_no_igemmlt: bool = False CB = None CxB = None SB = None SCB = None CxBt = None SBt = None CBt = None subB = None outlier_pool = None has_accumulated_gradients = False threshold = 0.0 idx = None is_training = True has_fp16_weights = True memory_efficient_backward = False use_pool = False formatB = F.get_special_format_str() at: bitsandbytes.autograd._functions.MatmulLtState.reset_grads self.CB = None self.CxB = None at: bitsandbytes.nn.modules.Int8Params.__new__ cls.SCB = None at: bitsandbytes.nn.modules.Int8Params.cuda self.data = CB at: bitsandbytes.nn.modules.Linear8bitLt.__init__ self.state = bnb.MatmulLtState() self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights) at: torch.nn.modules.module.Module dump_patches: bool = False _version: int = 1 training: bool _parameters: Dict[str, Optional[Parameter]] _buffers: Dict[str, Optional[Tensor]] _non_persistent_buffers_set: Set[str] _backward_pre_hooks: Dict[int, Callable] _backward_hooks: Dict[int, Callable] ===========unchanged ref 1=========== _is_full_backward_hook: Optional[bool] _forward_hooks: Dict[int, Callable] _forward_hooks_with_kwargs: Dict[int, bool] _forward_hooks_always_called: Dict[int, bool] _forward_pre_hooks: Dict[int, Callable] _forward_pre_hooks_with_kwargs: Dict[int, bool] _state_dict_hooks: Dict[int, Callable] _load_state_dict_pre_hooks: Dict[int, Callable] _state_dict_pre_hooks: Dict[int, Callable] _load_state_dict_post_hooks: Dict[int, Callable] _modules: Dict[str, Optional['Module']] call_super_init: bool = False _compiled_call_impl : Optional[Callable] = None forward: Callable[..., Any] = _forward_unimplemented __call__ : Callable[..., Any] = _wrapped_call_impl _save_to_state_dict(self, destination, prefix, keep_vars) _save_to_state_dict(destination, prefix, keep_vars) T_destination = TypeVar('T_destination', bound=Dict[str, Any]) state_dict(self, *, prefix: str=..., keep_vars: bool=...) -> Dict[str, Any] state_dict(self, *, destination: T_destination, prefix: str=..., keep_vars: bool=...) -> T_destination _load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) ===========changed ref 0=========== # module: bitsandbytes.autograd._functions @dataclass class MatmulLtState: + @property + def tile_indices(self): + if self._tile_indices is None: + device = self.CxB.device + transform = lambda x: F.transform(x.to(device), from_order="row", to_order=self.formatB)[0].to(x.device) + with torch.no_grad(): + self._tile_indices = get_inverse_transform_indices(transform, self.get_tile_size()).to(device) + return self._tile_indices + ===========changed ref 1=========== # module: bitsandbytes.autograd._functions @dataclass class MatmulLtState: + _tile_indices: Optional[torch.Tensor] = None - tile_indices: Optional[torch.Tensor] = None force_no_igemmlt: bool = False CB = None CxB = None SB = None SCB = None CxBt = None SBt = None CBt = None subB = None outlier_pool = None has_accumulated_gradients = False threshold = 0.0 idx = None is_training = True has_fp16_weights = True memory_efficient_backward = False use_pool = False formatB = F.get_special_format_str() ===========changed ref 2=========== # module: bitsandbytes.autograd._functions class MatMul8bitLt(torch.autograd.Function): @staticmethod def backward(ctx, grad_output): if ctx.is_empty: bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias) return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None req_gradA, req_gradB, _, req_gradBias, _ = ctx.needs_input_grad CAt, subA = ctx.tensors SCAt, idx = ctx.tensor_states formatB = ctx.formatB state = ctx.state grad_A = grad_B = grad_bias = None if req_gradBias: # compute grad_bias first before changing grad_output dtype grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias) # Cast grad_output to fp16 if len(grad_output.shape) == 3: grad_output = grad_output.reshape(-1, grad_output.shape[-1]).contiguous() Cgrad, Cgradt, SCgrad, SCgradt, coo_tensor = F.double_quant(grad_output.to(torch.float16)) if req_gradB: CxAt, SAt = F.transform(CAt, formatB, transpose=True) C32grad, Sgrad = F.transform(Cgradt, "col32", transpose=True) gradB32, SgradB32 = F.igemmlt(C32grad, CxAt, Sgrad, SAt) grad_B = F.mm_dequant(gradB32, SgradB32, SCgradt, SCAt) if state.threshold > 0.0 and subA is not None: grad_B[:, idx] += torch.matmul(grad_output.t(), subA) if req_gradA: if state.CBt is not None: C32grad,</s>
tests.test_linear8bitlt/test_linear_serialization
Modified
bitsandbytes-foundation~bitsandbytes
dcecbb26cafc040052e78b09b1cbe06929a9b776
Add force_no_igemmlt to test params
<10>:<add> if force_no_igemmlt: <add> linear_custom.state.force_no_igemmlt = True <add>
<s>, deserialize_before_cuda", + list(product([False, True], [False, True], [False, True], [False, True]))) - list(product([False, True], [False, True], [False, True]))) + def test_linear_serialization(has_fp16_weights, serialize_before_forward, deserialize_before_cuda, force_no_igemmlt): - def test_linear_serialization(has_fp16_weights, serialize_before_forward, deserialize_before_cuda): <0> linear = torch.nn.Linear(32, 96) <1> x = torch.randn(3, 32, dtype=torch.half) <2> <3> linear_custom = Linear8bitLt( <4> linear.in_features, <5> linear.out_features, <6> linear.bias is not None, <7> has_fp16_weights=has_fp16_weights, <8> threshold=6.0, <9> ) <10> linear_custom.weight = bnb.nn.Int8Params( <11> linear.weight.data.clone(), requires_grad=has_fp16_weights, has_fp16_weights=has_fp16_weights <12> ) <13> linear_custom.bias = linear.bias <14> linear_custom = linear_custom.cuda() <15> <16> if serialize_before_forward: <17> state_dict_8bit = linear_custom.state_dict() <18> <19> x_first = x.clone().cuda().requires_grad_(True) <20> fx_first = linear_custom(x_first).float() <21> grad_proj = torch.randn_like(fx_first) <22> (fx_first * grad_proj).mean().backward() <23> <24> if not serialize_before_forward: <25> state_dict_8bit = linear_custom.state_dict() <26> <27> with TemporaryDirectory() as tmpdir: <28> state_path_8bit = os.path.join(tmpdir, "state_8bit.pth") <29> state_path = os.path.join(tmpdir, "state.</s>
===========below chunk 0=========== <s>cuda", + list(product([False, True], [False, True], [False, True], [False, True]))) - list(product([False, True], [False, True], [False, True]))) + def test_linear_serialization(has_fp16_weights, serialize_before_forward, deserialize_before_cuda, force_no_igemmlt): - def test_linear_serialization(has_fp16_weights, serialize_before_forward, deserialize_before_cuda): # offset: 1 torch.save(linear.state_dict(), state_path) torch.save(state_dict_8bit, state_path_8bit) if not has_fp16_weights: assert os.path.getsize(state_path_8bit) < 0.5 * os.path.getsize(state_path) new_state_dict = torch.load(state_path_8bit) new_linear_custom = Linear8bitLt( linear.in_features, linear.out_features, linear.bias is not None, has_fp16_weights=has_fp16_weights, threshold=6.0, ) if deserialize_before_cuda: with nullcontext() if has_fp16_weights else pytest.raises(RuntimeError): new_linear_custom.load_state_dict(new_state_dict, strict=True) new_linear_custom = new_linear_custom.cuda() if not deserialize_before_cuda: new_linear_custom.load_state_dict(new_state_dict, strict=True) x_second = x.clone().cuda().requires_grad_(True) fx_second = new_linear_custom(x_second).float() (fx_second * grad_proj).mean().backward() # if 8-bit weights were loaded before .cuda, state is incorrect anyway and RuntimeError was raised if has_fp16_weights or not deserialize_before_cuda: assert torch.allclose(</s> ===========below chunk 1=========== <s>cuda", + list(product([False, True], [False, True], [False, True], [False, True]))) - list(product([False, True], [False, True], [False, True]))) + def test_linear_serialization(has_fp16_weights, serialize_before_forward, deserialize_before_cuda, force_no_igemmlt): - def test_linear_serialization(has_fp16_weights, serialize_before_forward, deserialize_before_cuda): # offset: 2 <s> and RuntimeError was raised if has_fp16_weights or not deserialize_before_cuda: assert torch.allclose(fx_first, fx_second, atol=1e-5) assert torch.allclose(x_first.grad, x_second.grad, atol=1e-5) ===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.mark.structures.MarkGenerator skip: _SkipMarkDecorator skipif: _SkipifMarkDecorator xfail: _XfailMarkDecorator parametrize: _ParametrizeMarkDecorator usefixtures: _UsefixturesMarkDecorator filterwarnings: _FilterwarningsMarkDecorator at: _pytest.python_api raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], func: Callable[..., Any], *args: Any, **kwargs: Any) -> _pytest._code.ExceptionInfo[E] raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], *, match: Optional[Union[str, Pattern[str]]]=...) -> "RaisesContext[E]" at: bitsandbytes.autograd._functions.MatmulLtState _tile_indices: Optional[torch.Tensor] = None force_no_igemmlt: bool = False CB = None CxB = None SB = None SCB = None CxBt = None SBt = None CBt = None subB = None outlier_pool = None has_accumulated_gradients = False threshold = 0.0 idx = None is_training = True has_fp16_weights = True memory_efficient_backward = False use_pool = False formatB = F.get_special_format_str() at: bitsandbytes.nn.modules Int8Params(data: Tensor=..., requires_grad: builtins.bool=...) Linear8bitLt(input_features, output_features, bias=True, has_fp16_weights=True, memory_efficient_backward=False, threshold=0.0, index=None) ===========unchanged ref 1=========== at: bitsandbytes.nn.modules.Linear8bitLt.__init__ self.state = bnb.MatmulLtState() self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights) at: contextlib nullcontext(enter_result: _T) -> ContextManager[_T] nullcontext() -> ContextManager[None] ===========unchanged ref 2=========== at: itertools product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4], iter5: Iterable[_T5], iter6: Iterable[_T6]) -> Iterator[Tuple[_T1, _T2, _T3, _T4, _T5, _T6]] product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3]) -> Iterator[Tuple[_T1, _T2, _T3]] product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4]) -> Iterator[Tuple[_T1, _T2, _T3, _T4]] product(iter1: Iterable[_T1], iter2: Iterable[_T2]) -> Iterator[Tuple[_T1, _T2]] product(*iterables: Iterable[_T1], repeat: int) -> Iterator[Tuple[_T1, ...]] product(iter1: Iterable[_T1]) -> Iterator[Tuple[_T1]] product(*iterables: Iterable[Any], repeat: int=...) -> Iterator[Tuple[Any, ...]] product(iter1: Iterable[Any], iter2: Iterable[Any], iter3: Iterable[Any], iter4: Iterable[Any], iter5: Iterable[Any], iter6: Iterable[Any], iter7: Iterable[Any], *iterables: Iterable[Any]) -> Iterator[Tuple[Any, ...]] product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4], iter5: Iterable[_T5]) -> Iterator[Tuple[_T1, _T2, _T3, _T4, _T5]] at: os.path join(a: StrPath, *paths: StrPath) -> str join(a: BytesPath, *paths: BytesPath) -> bytes getsize(filename: AnyPath) -> int ===========unchanged ref 3=========== at: tempfile TemporaryDirectory(suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=...) at: torch._C half: dtype = ...
bitsandbytes.cuda_setup.env_vars/to_be_ignored
Modified
bitsandbytes-foundation~bitsandbytes
ed6f3eb146d67d6a2618f85227048be1ba82725a
Merge pull request #159 from TimDettmers/serialize_8bit
<8>:<add> "XDG_GREETER_DATA_DIR", # XDG: Desktop environment stuff
# module: bitsandbytes.cuda_setup.env_vars def to_be_ignored(env_var: str, value: str) -> bool: <0> ignorable = { <1> "PWD", # PWD: this is how the shell keeps track of the current working dir <2> "OLDPWD", <3> "SSH_AUTH_SOCK", # SSH stuff, therefore unrelated <4> "SSH_TTY", <5> "HOME", # Linux shell default <6> "TMUX", # Terminal Multiplexer <7> "XDG_DATA_DIRS", # XDG: Desktop environment stuff <8> "XDG_RUNTIME_DIR", <9> "MAIL", # something related to emails <10> "SHELL", # binary for currently invoked shell <11> "DBUS_SESSION_BUS_ADDRESS", # hardware related <12> "PATH", # this is for finding binaries, not libraries <13> "LESSOPEN", # related to the `less` command <14> "LESSCLOSE", <15> "_", # current Python interpreter <16> } <17> return env_var in ignorable <18>
bitsandbytes.cuda_setup.main/is_cublasLt_compatible
Modified
bitsandbytes-foundation~bitsandbytes
ed6f3eb146d67d6a2618f85227048be1ba82725a
Merge pull request #159 from TimDettmers/serialize_8bit
<4>:<add> CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True) <del> cuda_setup.add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True)
# module: bitsandbytes.cuda_setup.main def is_cublasLt_compatible(cc): <0> has_cublaslt = False <1> if cc is not None: <2> cc_major, cc_minor = cc.split('.') <3> if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5): <4> cuda_setup.add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True) <5> else: <6> has_cublaslt = True <7> return has_cublaslt <8>
===========changed ref 0=========== # module: bitsandbytes.cuda_setup.env_vars def to_be_ignored(env_var: str, value: str) -> bool: ignorable = { "PWD", # PWD: this is how the shell keeps track of the current working dir "OLDPWD", "SSH_AUTH_SOCK", # SSH stuff, therefore unrelated "SSH_TTY", "HOME", # Linux shell default "TMUX", # Terminal Multiplexer "XDG_DATA_DIRS", # XDG: Desktop environment stuff + "XDG_GREETER_DATA_DIR", # XDG: Desktop environment stuff "XDG_RUNTIME_DIR", "MAIL", # something related to emails "SHELL", # binary for currently invoked shell "DBUS_SESSION_BUS_ADDRESS", # hardware related "PATH", # this is for finding binaries, not libraries "LESSOPEN", # related to the `less` command "LESSCLOSE", "_", # current Python interpreter } return env_var in ignorable
bitsandbytes.cuda_setup.main/evaluate_cuda_setup
Modified
bitsandbytes-foundation~bitsandbytes
ed6f3eb146d67d6a2618f85227048be1ba82725a
Merge pull request #159 from TimDettmers/serialize_8bit
<5>:<add> if not torch.cuda.is_available(): return 'libbitsandbytes_cpu.so', None, None, None, None <del> if not torch.cuda.is_available(): return 'libsbitsandbytes_cpu.so', None, None, None, None
# module: bitsandbytes.cuda_setup.main def evaluate_cuda_setup(): <0> if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0': <1> print('') <2> print('='*35 + 'BUG REPORT' + '='*35) <3> print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues') <4> print('='*80) <5> if not torch.cuda.is_available(): return 'libsbitsandbytes_cpu.so', None, None, None, None <6> <7> cuda_setup = CUDASetup.get_instance() <8> cudart_path = determine_cuda_runtime_lib_path() <9> cuda = get_cuda_lib_handle() <10> cc = get_compute_capability(cuda) <11> cuda_version_string = get_cuda_version(cuda, cudart_path) <12> <13> failure = False <14> if cudart_path is None: <15> failure = True <16> cuda_setup.add_log_entry("WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!", is_warning=True) <17> else: <18> cuda_setup.add_log_entry(f"CUDA SETUP: CUDA runtime path found: {cudart_path}") <19> <20> if cc == '' or cc is None: <21> failure = True <22> cuda_setup.add_log_entry("WARNING: No GPU detected! Check your CUDA paths. Proceeding to load CPU-only library...", is_warning=True) <23> else: <24> cuda_setup.add_log_entry(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}") <25> <26> if cuda is None: <27> failure = True <28> else: <29> cuda_setup.add_log_entry(f'CUDA</s>
===========below chunk 0=========== # module: bitsandbytes.cuda_setup.main def evaluate_cuda_setup(): # offset: 1 # 7.5 is the minimum CC vor cublaslt has_cublaslt = is_cublasLt_compatible(cc) # TODO: # (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible) # (2) Multiple CUDA versions installed # we use ls -l instead of nvcc to determine the cuda version # since most installations will have the libcudart.so installed, but not the compiler if failure: binary_name = "libbitsandbytes_cpu.so" elif has_cublaslt: binary_name = f"libbitsandbytes_cuda{cuda_version_string}.so" else: "if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so" binary_name = f"libbitsandbytes_cuda{cuda_version_string}_nocublaslt.so" return binary_name, cudart_path, cuda, cc, cuda_version_string ===========changed ref 0=========== # module: bitsandbytes.cuda_setup.main def is_cublasLt_compatible(cc): has_cublaslt = False if cc is not None: cc_major, cc_minor = cc.split('.') if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5): + CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True) - cuda_setup.add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True) else: has_cublaslt = True return has_cublaslt ===========changed ref 1=========== # module: bitsandbytes.cuda_setup.env_vars def to_be_ignored(env_var: str, value: str) -> bool: ignorable = { "PWD", # PWD: this is how the shell keeps track of the current working dir "OLDPWD", "SSH_AUTH_SOCK", # SSH stuff, therefore unrelated "SSH_TTY", "HOME", # Linux shell default "TMUX", # Terminal Multiplexer "XDG_DATA_DIRS", # XDG: Desktop environment stuff + "XDG_GREETER_DATA_DIR", # XDG: Desktop environment stuff "XDG_RUNTIME_DIR", "MAIL", # something related to emails "SHELL", # binary for currently invoked shell "DBUS_SESSION_BUS_ADDRESS", # hardware related "PATH", # this is for finding binaries, not libraries "LESSOPEN", # related to the `less` command "LESSCLOSE", "_", # current Python interpreter } return env_var in ignorable
bitsandbytes.autograd._functions/MatMul8bitLt.forward
Modified
bitsandbytes-foundation~bitsandbytes
ed6f3eb146d67d6a2618f85227048be1ba82725a
Merge pull request #159 from TimDettmers/serialize_8bit
<0>:<add> using_igemmlt = supports_igemmlt(A.device) and not state.force_no_igemmlt <del> using_igemmlt = torch.cuda.get_device_capability(device=A.device) >= (7, 5) and not state.force_no_igemmlt
# module: bitsandbytes.autograd._functions class MatMul8bitLt(torch.autograd.Function): # forward is the same, but we added the fallback for pre-turing GPUs # backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None") @staticmethod def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState): <0> using_igemmlt = torch.cuda.get_device_capability(device=A.device) >= (7, 5) and not state.force_no_igemmlt <1> # default of pytorch behavior if inputs are empty <2> ctx.is_empty = False <3> if prod(A.shape) == 0: <4> ctx.is_empty = True <5> ctx.A = A <6> ctx.B = B <7> ctx.bias = bias <8> if A.shape[-1] == B.shape[0]: <9> return torch.empty(A.shape[:-1] + B.shape[1:], dtype=A.dtype, device=A.device) <10> else: <11> return torch.empty(A.shape[:-1] + B.shape[:1], dtype=A.dtype, device=A.device) <12> <13> # 1. Quantize A <14> # 2. Quantize B <15> # 3. Matmul <16> # 4. Mixed-precision decomposition matmul <17> # 5. Save state <18> formatB = state.formatB <19> input_shape = A.shape <20> if state.outlier_pool is None: <21> state.outlier_pool = GlobalOutlierPooler.get_instance() <22> <23> # Cast A to fp16 <24> if A.dtype != torch.float16: <25> warnings.warn(f"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization") <26> <27> # 1. Quantize A <28> if len(A.shape) == 3: <29> A = A.view(-1, A.</s>
===========below chunk 0=========== # module: bitsandbytes.autograd._functions class MatMul8bitLt(torch.autograd.Function): # forward is the same, but we added the fallback for pre-turing GPUs # backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None") @staticmethod def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState): # offset: 1 CA, CAt, SCA, SCAt, coo_tensorA = F.double_quant(A.to(torch.float16), threshold=state.threshold) if state.threshold > 0.0 and coo_tensorA is not None: if state.has_fp16_weights: idx = torch.unique(coo_tensorA.colidx).long() CA[:, idx] = 0 CAt[:, idx] = 0 subA = A[:, idx] state.subB = B[:, idx].t().contiguous() state.idx = idx else: if state.CxB is None and using_igemmlt: # B in in 8-bit row-major, we can transform it back to 16-bit to extract outlier dimensions # we also need to convert it to the turing/ampere format state.CxB, state.SB = F.transform(state.CB, to_order=formatB) else: if not state.has_fp16_weights and state.CxB is None and using_igemmlt: state.CxB, state.SB = F.transform(state.CB, to_order=formatB) subA = None # 2. Quantize B if state.has_fp16_weights: has_grad = True if (getattr(B, "grad", None) is not None) else False is_transposed = not B.is_contiguous() and B.shape[0] == B.stride(1) if is_transposed: B = B.contiguous() if (state.</s> ===========below chunk 1=========== # module: bitsandbytes.autograd._functions class MatMul8bitLt(torch.autograd.Function): # forward is the same, but we added the fallback for pre-turing GPUs # backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None") @staticmethod def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState): # offset: 2 <s> == B.stride(1) if is_transposed: B = B.contiguous() if (state.is_training and not has_grad) or state.CxB is None: state.reset_grads() ( CB, state.CBt, state.SCB, state.SCBt, coo_tensorB, ) = F.double_quant(B.to(torch.float16)) if using_igemmlt: state.CxB, state.SB = F.transform(CB, to_order=formatB) else: state.CB = CB else: has_grad = False if coo_tensorA is not None and not state.has_fp16_weights: # extract outliers outlier_idx = torch.unique(coo_tensorA.colidx) state.idx = outlier_idx # state.outlier_pool.add_outliers(outlier_idx, A.shape[-1]) # if state.use_pool and state.outlier_pool.model_dim == A.shape[-1]: # # do not use pool for 2nd FFN layer # state.idx = state.outlier_pool.get_current_outlier_idx().to(A.device) # else: # state.idx = outlier_idx if state.CxB is not None: outliers = F.extract_outliers</s> ===========below chunk 2=========== # module: bitsandbytes.autograd._functions class MatMul8bitLt(torch.autograd.Function): # forward is the same, but we added the fallback for pre-turing GPUs # backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None") @staticmethod def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState): # offset: 3 <s>.CxB, state.SB, state.idx.int()) else: outliers = state.CB[:, state.idx.long()].clone() state.subB = (outliers * state.SCB.view(-1, 1) / 127.0).t().contiguous().to(A.dtype) CA[:, state.idx.long()] = 0 CAt[:, state.idx.long()] = 0 subA = A[:, state.idx.long()] shapeB = state.SB[0] if state.SB else B.shape if len(input_shape) == 3: output_shape = (input_shape[0], input_shape[1], shapeB[0]) else: output_shape = (input_shape[0], shapeB[0]) # 3. Matmul if using_igemmlt: C32A, SA = F.transform(CA, "col32") out32, Sout32 = F.igemmlt(C32A, state.CxB, SA, state.SB) if bias is None or bias.dtype == torch.float16: # we apply the fused bias here output = F.mm_dequant(out32, Sout32, SCA, state.SCB, bias=bias) output = output.to(A.dtype) else: # apply bias separately output = F.mm_dequant(out32, Sout32, SCA, state.</s> ===========below chunk 3=========== # module: bitsandbytes.autograd._functions class MatMul8bitLt(torch.autograd.Function): # forward is the same, but we added the fallback for pre-turing GPUs # backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None") @staticmethod def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState): # offset: 4 <s>, bias=None) output = output.to(A.dtype).add_(bias) else: A_wo_outliers = A.clone() if state.idx is not None: A_wo_outliers[:, state.idx.long()] = 0 output = torch.nn.functional.linear(A_wo_outliers, state.CB.to(A.dtype)) output = output.mul_(state.SCB.unsqueeze(0).mul(1.0 / 127.0)) if bias is not None: output = output.add_(bias) # 4. Mixed-precision decomposition matmul if coo_tensorA is not None and subA is not None: output += torch.matmul(subA, state.subB) # 5. Save state ctx.state = state ctx.formatB = formatB ctx.grad_shape = input_shape ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype if any(ctx.needs_input_grad[:2]): ctx.tensors = (CAt, subA) ctx.tensor_states = (SCAt, state.idx) else: ctx.tensors = [None, None] ctx.tensor_states = (None, None) ctx.save_for_backward(None, None) clone_func = torch.
bitsandbytes.functional/quantize_no_absmax
Modified
bitsandbytes-foundation~bitsandbytes
ed6f3eb146d67d6a2618f85227048be1ba82725a
Merge pull request #159 from TimDettmers/serialize_8bit
<20>:<add> prev_device = pre_call(A.device) <23>:<add> post_call(prev_device)
# module: bitsandbytes.functional def quantize_no_absmax(A: Tensor, code: Tensor, out: Tensor = None) -> Tensor: <0> ''' <1> Quantizes input tensor to 8-bit. <2> <3> Quantizes the 32-bit input tensor `A` to the 8-bit output tensor <4> `out` using the quantization map `code`. <5> <6> Parameters <7> ---------- <8> A : torch.Tensor <9> The input tensor. <10> code : torch.Tensor <11> The quantization map. <12> out : torch.Tensor, optional <13> The output tensor. Needs to be of type byte. <14> <15> Returns <16> ------- <17> torch.Tensor: <18> Quantized 8-bit tensor. <19> ''' <20> if out is None: out = torch.zeros_like(A, dtype=torch.uint8) <21> is_on_gpu([A, out]) <22> lib.cquantize(get_ptr(code), get_ptr(A), get_ptr(out), ct.c_int(A.numel())) <23> return out <24>
===========changed ref 0=========== # module: bitsandbytes.autograd._functions + def supports_igemmlt(device: torch.device) -> bool: + """check if this device supports the optimized int8 kernel""" + if torch.cuda.get_device_capability(device=device) < (7, 5): + return False + device_name = torch.cuda.get_device_name(device=device) + nvidia16_models = ('GTX 1630', 'GTX 1650', 'GTX 1660') # https://en.wikipedia.org/wiki/GeForce_16_series + if any(model_name in device_name for model_name in nvidia16_models): + return False # these devices are technically cuda 7.5-capable, but they lack tensor cores + return True + ===========changed ref 1=========== # module: bitsandbytes.cuda_setup.main def is_cublasLt_compatible(cc): has_cublaslt = False if cc is not None: cc_major, cc_minor = cc.split('.') if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5): + CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True) - cuda_setup.add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True) else: has_cublaslt = True return has_cublaslt ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.env_vars def to_be_ignored(env_var: str, value: str) -> bool: ignorable = { "PWD", # PWD: this is how the shell keeps track of the current working dir "OLDPWD", "SSH_AUTH_SOCK", # SSH stuff, therefore unrelated "SSH_TTY", "HOME", # Linux shell default "TMUX", # Terminal Multiplexer "XDG_DATA_DIRS", # XDG: Desktop environment stuff + "XDG_GREETER_DATA_DIR", # XDG: Desktop environment stuff "XDG_RUNTIME_DIR", "MAIL", # something related to emails "SHELL", # binary for currently invoked shell "DBUS_SESSION_BUS_ADDRESS", # hardware related "PATH", # this is for finding binaries, not libraries "LESSOPEN", # related to the `less` command "LESSCLOSE", "_", # current Python interpreter } return env_var in ignorable ===========changed ref 3=========== # module: bitsandbytes.functional name2qmap = {} if COMPILED_WITH_CUDA: """C FUNCTIONS FOR OPTIMIZERS""" str2optimizer32bit = {} str2optimizer32bit["adam"] = (lib.cadam32bit_g32, lib.cadam32bit_g16) str2optimizer32bit["momentum"] = ( lib.cmomentum32bit_g32, lib.cmomentum32bit_g16, ) str2optimizer32bit["rmsprop"] = ( lib.crmsprop32bit_g32, lib.crmsprop32bit_g16, + ) + str2optimizer32bit["lion"] = ( + lib.clion32bit_g32, + lib.clion32bit_g16, ) str2optimizer32bit["adagrad"] = ( lib.cadagrad32bit_g32, lib.cadagrad32bit_g16, ) str2optimizer32bit["lars"] = ( lib.cmomentum32bit_g32, lib.cmomentum32bit_g16, ) str2optimizer32bit["lamb"] = (lib.cadam32bit_g32, lib.cadam32bit_g16) str2optimizer8bit = {} str2optimizer8bit["adam"] = ( lib.cadam_static_8bit_g32, lib.cadam_static_8bit_g16, ) str2optimizer8bit["momentum"] = ( lib.cmomentum_static_8bit_g32, lib.cmomentum_static_8bit_g16, ) str2optimizer8bit["rmsprop"] = ( lib.crmsprop_static_8bit_g32, lib.crmsprop_static_8bit_g16, ) + str2optimizer8bit["lion"] = ( + lib.clion_static</s> ===========changed ref 4=========== # module: bitsandbytes.functional # offset: 1 <s>_g16, ) + str2optimizer8bit["lion"] = ( + lib.clion_static_8bit_g32, + lib.clion_static_8bit_g16, + ) str2optimizer8bit["lamb"] = ( lib.cadam_static_8bit_g32, lib.cadam_static_8bit_g16, ) str2optimizer8bit["lars"] = ( lib.cmomentum_static_8bit_g32, lib.cmomentum_static_8bit_g16, ) str2optimizer8bit_blockwise = {} str2optimizer8bit_blockwise["adam"] = ( lib.cadam_8bit_blockwise_fp32, lib.cadam_8bit_blockwise_fp16, ) str2optimizer8bit_blockwise["momentum"] = ( lib.cmomentum_8bit_blockwise_fp32, lib.cmomentum_8bit_blockwise_fp16, ) str2optimizer8bit_blockwise["rmsprop"] = ( lib.crmsprop_8bit_blockwise_fp32, lib.crmsprop_8bit_blockwise_fp16, ) + str2optimizer8bit_blockwise["lion"] = ( + lib.clion_8bit_blockwise_fp32, + lib.clion_8bit_blockwise_fp16, + ) str2optimizer8bit_blockwise["adagrad"] = ( lib.cadagrad_8bit_blockwise_fp32, lib.cadagrad_8bit_blockwise_fp16, )
bitsandbytes.functional/dequantize_no_absmax
Modified
bitsandbytes-foundation~bitsandbytes
ed6f3eb146d67d6a2618f85227048be1ba82725a
Merge pull request #159 from TimDettmers/serialize_8bit
<20>:<add> prev_device = pre_call(A.device) <23>:<add> post_call(prev_device)
# module: bitsandbytes.functional def dequantize_no_absmax(A: Tensor, code: Tensor, out: Tensor = None) -> Tensor: <0> ''' <1> Dequantizes the 8-bit tensor to 32-bit. <2> <3> Dequantizes the 8-bit tensor `A` to the 32-bit tensor `out` via <4> the quantization map `code`. <5> <6> Parameters <7> ---------- <8> A : torch.Tensor <9> The 8-bit input tensor. <10> code : torch.Tensor <11> The quantization map. <12> out : torch.Tensor <13> The 32-bit output tensor. <14> <15> Returns <16> ------- <17> torch.Tensor: <18> 32-bit output tensor. <19> ''' <20> if out is None: out = torch.zeros_like(A, dtype=torch.float32) <21> is_on_gpu([code, A, out]) <22> lib.cdequantize(get_ptr(code), get_ptr(A), get_ptr(out), ct.c_int(A.numel())) <23> return out <24>
===========changed ref 0=========== # module: bitsandbytes.functional def quantize_no_absmax(A: Tensor, code: Tensor, out: Tensor = None) -> Tensor: ''' Quantizes input tensor to 8-bit. Quantizes the 32-bit input tensor `A` to the 8-bit output tensor `out` using the quantization map `code`. Parameters ---------- A : torch.Tensor The input tensor. code : torch.Tensor The quantization map. out : torch.Tensor, optional The output tensor. Needs to be of type byte. Returns ------- torch.Tensor: Quantized 8-bit tensor. ''' + prev_device = pre_call(A.device) if out is None: out = torch.zeros_like(A, dtype=torch.uint8) is_on_gpu([A, out]) lib.cquantize(get_ptr(code), get_ptr(A), get_ptr(out), ct.c_int(A.numel())) + post_call(prev_device) return out ===========changed ref 1=========== # module: bitsandbytes.autograd._functions + def supports_igemmlt(device: torch.device) -> bool: + """check if this device supports the optimized int8 kernel""" + if torch.cuda.get_device_capability(device=device) < (7, 5): + return False + device_name = torch.cuda.get_device_name(device=device) + nvidia16_models = ('GTX 1630', 'GTX 1650', 'GTX 1660') # https://en.wikipedia.org/wiki/GeForce_16_series + if any(model_name in device_name for model_name in nvidia16_models): + return False # these devices are technically cuda 7.5-capable, but they lack tensor cores + return True + ===========changed ref 2=========== # module: bitsandbytes.cuda_setup.main def is_cublasLt_compatible(cc): has_cublaslt = False if cc is not None: cc_major, cc_minor = cc.split('.') if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5): + CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True) - cuda_setup.add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True) else: has_cublaslt = True return has_cublaslt ===========changed ref 3=========== # module: bitsandbytes.cuda_setup.env_vars def to_be_ignored(env_var: str, value: str) -> bool: ignorable = { "PWD", # PWD: this is how the shell keeps track of the current working dir "OLDPWD", "SSH_AUTH_SOCK", # SSH stuff, therefore unrelated "SSH_TTY", "HOME", # Linux shell default "TMUX", # Terminal Multiplexer "XDG_DATA_DIRS", # XDG: Desktop environment stuff + "XDG_GREETER_DATA_DIR", # XDG: Desktop environment stuff "XDG_RUNTIME_DIR", "MAIL", # something related to emails "SHELL", # binary for currently invoked shell "DBUS_SESSION_BUS_ADDRESS", # hardware related "PATH", # this is for finding binaries, not libraries "LESSOPEN", # related to the `less` command "LESSCLOSE", "_", # current Python interpreter } return env_var in ignorable ===========changed ref 4=========== # module: bitsandbytes.functional name2qmap = {} if COMPILED_WITH_CUDA: """C FUNCTIONS FOR OPTIMIZERS""" str2optimizer32bit = {} str2optimizer32bit["adam"] = (lib.cadam32bit_g32, lib.cadam32bit_g16) str2optimizer32bit["momentum"] = ( lib.cmomentum32bit_g32, lib.cmomentum32bit_g16, ) str2optimizer32bit["rmsprop"] = ( lib.crmsprop32bit_g32, lib.crmsprop32bit_g16, + ) + str2optimizer32bit["lion"] = ( + lib.clion32bit_g32, + lib.clion32bit_g16, ) str2optimizer32bit["adagrad"] = ( lib.cadagrad32bit_g32, lib.cadagrad32bit_g16, ) str2optimizer32bit["lars"] = ( lib.cmomentum32bit_g32, lib.cmomentum32bit_g16, ) str2optimizer32bit["lamb"] = (lib.cadam32bit_g32, lib.cadam32bit_g16) str2optimizer8bit = {} str2optimizer8bit["adam"] = ( lib.cadam_static_8bit_g32, lib.cadam_static_8bit_g16, ) str2optimizer8bit["momentum"] = ( lib.cmomentum_static_8bit_g32, lib.cmomentum_static_8bit_g16, ) str2optimizer8bit["rmsprop"] = ( lib.crmsprop_static_8bit_g32, lib.crmsprop_static_8bit_g16, ) + str2optimizer8bit["lion"] = ( + lib.clion_static</s> ===========changed ref 5=========== # module: bitsandbytes.functional # offset: 1 <s>_g16, ) + str2optimizer8bit["lion"] = ( + lib.clion_static_8bit_g32, + lib.clion_static_8bit_g16, + ) str2optimizer8bit["lamb"] = ( lib.cadam_static_8bit_g32, lib.cadam_static_8bit_g16, ) str2optimizer8bit["lars"] = ( lib.cmomentum_static_8bit_g32, lib.cmomentum_static_8bit_g16, ) str2optimizer8bit_blockwise = {} str2optimizer8bit_blockwise["adam"] = ( lib.cadam_8bit_blockwise_fp32, lib.cadam_8bit_blockwise_fp16, ) str2optimizer8bit_blockwise["momentum"] = ( lib.cmomentum_8bit_blockwise_fp32, lib.cmomentum_8bit_blockwise_fp16, ) str2optimizer8bit_blockwise["rmsprop"] = ( lib.crmsprop_8bit_blockwise_fp32, lib.crmsprop_8bit_blockwise_fp16, ) + str2optimizer8bit_blockwise["lion"] = ( + lib.clion_8bit_blockwise_fp32, + lib.clion_8bit_blockwise_fp16, + ) str2optimizer8bit_blockwise["adagrad"] = ( lib.cadagrad_8bit_blockwise_fp32, lib.cadagrad_8bit_blockwise_fp16, )
tests.test_optim/test_optimizer8bit
Modified
bitsandbytes-foundation~bitsandbytes
792af5c8838568d47e6421fece9dcb7460b20adc
Fixed noisy tests for 8-bit Lion.
<29>:<add> # since Lion can have pretty noisy updates where things lie at the boundary <add> # allow up to 5 errors for Lion <add> assert_most_approx_close(p1, p2.float(), patol, prtol, max_error_count=5) <del> torch.testing.assert_allclose(p1, p2.float(), atol=patol, rtol=prtol)
# module: tests.test_optim @pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names) def test_optimizer8bit(dim1, dim2, gtype, optim_name): <0> if dim1 == 1 and dim2 == 1: <1> return <2> p1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1 <3> p2 = p1.clone() <4> p1 = p1.float() <5> blocksize = 2048 <6> <7> torch_optimizer = str2optimizers[optim_name][0]([p1]) <8> bnb_optimizer = str2optimizers[optim_name][1]([p2]) <9> <10> if gtype == torch.float32: <11> atol, rtol = 3e-3, 1e-3 <12> patol, prtol = 1e-5, 1e-3 <13> <14> else: <15> atol, rtol = 3e-3, 1e-3 <16> patol, prtol = 1e-5, 1e-3 <17> <18> errors = [] <19> relerrors = [] <20> <21> for i in range(50): <22> g = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.01 <23> p1.grad = g.clone().float() <24> p2.grad = g.clone() <25> <26> bnb_optimizer.step() <27> torch_optimizer.step() <28> <29> torch.testing.assert_allclose(p1, p2.float(), atol=patol, rtol=prtol) <30> <31> dequant_states = [] <32> for name1, name2, qmap, max_val in str2statenames[optim_name]: <33> # print(bnb_optimizer.state[p2][max_val], name1) <34> if "blockwise" in optim_name: <35> s1 = F.dequantize_blockwise( <36> code=bnb_optimizer.state</s>
===========below chunk 0=========== # module: tests.test_optim @pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names) def test_optimizer8bit(dim1, dim2, gtype, optim_name): # offset: 1 absmax=bnb_optimizer.state[p2][max_val], A=bnb_optimizer.state[p2][name2], blocksize=blocksize, ) else: s1 = F.dequantize( code=bnb_optimizer.state[p2][qmap], absmax=bnb_optimizer.state[p2][max_val], A=bnb_optimizer.state[p2][name2], ) num_not_close = ( torch.isclose( torch_optimizer.state[p1][name1], s1, atol=atol, rtol=rtol ) == 0 ) assert num_not_close.sum().item() < 20 dequant_states.append(s1.clone()) err = torch.abs(p1 - p2) relerr = err / torch.abs(p1) assert err.mean() < 0.0001 assert relerr.mean() < 0.001 errors.append(err.mean().item()) relerrors.append(relerr.mean().item()) if i % 10 == 0 and i > 0: for (name1, name2, qmap, max_val), s in zip( str2statenames[optim_name], dequant_states ): s1cpy = s.clone() raws1cpy = bnb_optimizer.state[p2][name2].clone() qmap1 = bnb_optimizer.state[p2][qmap].clone() path = get_temp_dir() torch.save(bnb_optimizer.state_dict(), join(path, "opt.pt")) del bnb_optimizer bnb_optimizer = None bnb_optimizer = str2</s> ===========below chunk 1=========== # module: tests.test_optim @pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names) def test_optimizer8bit(dim1, dim2, gtype, optim_name): # offset: 2 <s> "opt.pt")) del bnb_optimizer bnb_optimizer = None bnb_optimizer = str2optimizers[optim_name][1]([p2]) bnb_optimizer.load_state_dict(torch.load(join(path, "opt.pt"))) rm_path(path) torch.testing.assert_allclose( raws1cpy, bnb_optimizer.state[p2][name2] ) torch.testing.assert_allclose( qmap1, bnb_optimizer.state[p2][qmap] ) if "blockwise" in optim_name: s1 = F.dequantize_blockwise( code=bnb_optimizer.state[p2][qmap], absmax=bnb_optimizer.state[p2][max_val], A=bnb_optimizer.state[p2][name2], blocksize=blocksize, ) else: s1 = F.dequantize( code=bnb_optimizer.state[p2][qmap], absmax=bnb_optimizer.state[p2][max_val], A=bnb_optimizer.state[p2][name2], ) torch.testing.assert_allclose(s1cpy, s1) num_not_close = ( torch.isclose( torch_optimizer.state[p1][name1], s1, atol=atol, rtol=rtol, ) == 0 ) assert num_not_close.sum().item() < 20 torch.testing.</s> ===========below chunk 2=========== # module: tests.test_optim @pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names) def test_optimizer8bit(dim1, dim2, gtype, optim_name): # offset: 3 <s>allclose( p1, p2.float(), atol=patol, rtol=prtol ) # the parameters diverge quickly. Here we keep them close # together so we can test against the Adam error p1.data = p1.data.to(gtype).float() p2.copy_(p1.data) torch.testing.assert_allclose(p1.to(gtype), p2) for (name1, name2, qmap, max_val), s in zip( str2statenames[optim_name], dequant_states ): torch_optimizer.state[p1][name1].copy_(s.data) ===========changed ref 0=========== # module: tests.test_optim + def assert_most_approx_close(a, b, rtol=1e-3, atol=1e-3, max_error_count=0): + idx = torch.isclose(a, b, rtol, atol) + error_count = (idx == 0).sum().item() + if error_count > max_error_count: + print(f"Too many values not close: assert {sumval} < {count}") + torch.testing.assert_allclose(a, b, rtol, atol) +
bitsandbytes.optim.optimizer/Optimizer1State.update_step
Modified
bitsandbytes-foundation~bitsandbytes
2eb310835668f854c169953814f1d3b16a44346b
Fixed bug where beta2 was not passed into Lion 32-bit.
<26>:<add> config['betas'][1], <del> 0.0,
# module: bitsandbytes.optim.optimizer class Optimizer1State(Optimizer8bit): @torch.no_grad() def update_step(self, group, p, gindex, pindex): <0> state = self.state[p] <1> grad = p.grad <2> <3> config = self.get_config(gindex, pindex, group) <4> <5> state["step"] += 1 <6> step = state["step"] <7> <8> if config["percentile_clipping"] < 100: <9> current_gnorm, clip_value, gnorm_scale = F.percentile_clipping( <10> grad, state["gnorm_vec"], step, config["percentile_clipping"] <11> ) <12> else: <13> gnorm_scale = 1.0 <14> <15> if state["state1"].dtype == torch.float: <16> F.optimizer_update_32bit( <17> self.optimizer_name, <18> grad, <19> p, <20> state["state1"], <21> config["betas"][0], <22> config["eps"], <23> step, <24> config["lr"], <25> None, <26> 0.0, <27> config["weight_decay"], <28> gnorm_scale, <29> state["unorm_vec"] if config["max_unorm"] > 0.0 else None, <30> max_unorm=config["max_unorm"], <31> skip_zeros=config["skip_zeros"], <32> ) <33> <34> elif state["state1"].dtype == torch.uint8 and not config["block_wise"]: <35> F.optimizer_update_8bit( <36> self.optimizer_name, <37> grad, <38> p, <39> state["state1"], <40> None, <41> config["betas"][0], <42> config["betas"][1], <43> config["eps"], <44> step, <45> config["lr"], <46> state["qmap1"], <47> None, <48> state["max1"], </s>
===========below chunk 0=========== # module: bitsandbytes.optim.optimizer class Optimizer1State(Optimizer8bit): @torch.no_grad() def update_step(self, group, p, gindex, pindex): # offset: 1 state["new_max1"], None, config["weight_decay"], gnorm_scale, state["unorm_vec"] if config["max_unorm"] > 0.0 else None, max_unorm=config["max_unorm"], ) state["max1"], state["new_max1"] = state["new_max1"], state["max1"] elif state["state1"].dtype == torch.uint8 and config["block_wise"]: F.optimizer_update_8bit_blockwise( self.optimizer_name, grad, p, state["state1"], None, config["betas"][0], config["betas"][1], config["eps"], step, config["lr"], state["qmap1"], None, state["absmax1"], None, config["weight_decay"], gnorm_scale=gnorm_scale, skip_zeros=config["skip_zeros"], ) ===========unchanged ref 0=========== at: bitsandbytes.functional optimizer_update_32bit(optimizer_name: str, g: Tensor, p: Tensor, state1: Tensor, beta1: float, eps: float, step: int, lr: float, state2: Tensor=None, beta2: float=0.0, weight_decay: float=0.0, gnorm_scale: float=1.0, unorm_vec: Tensor=None, max_unorm: float=0.0, skip_zeros=False) -> None optimizer_update_8bit(optimizer_name: str, g: Tensor, p: Tensor, state1: Tensor, state2: Tensor, beta1: float, beta2: float, eps: float, step: int, lr: float, qmap1: Tensor, qmap2: Tensor, max1: Tensor, max2: Tensor, new_max1: Tensor, new_max2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0, unorm_vec: Tensor=None, max_unorm: float=0.0) -> None optimizer_update_8bit_blockwise(optimizer_name: str, g: Tensor, p: Tensor, state1: Tensor, state2: Tensor, beta1: float, beta2: float, eps: float, step: int, lr: float, qmap1: Tensor, qmap2: Tensor, absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0, skip_zeros=False) -> None percentile_clipping(grad: Tensor, gnorm_vec: Tensor, step: int, percentile: int=5) at: bitsandbytes.optim.optimizer.Optimizer1State.__init__ self.optimizer_name = optimizer_name at: bitsandbytes.optim.optimizer.Optimizer8bit get_config(gindex, pindex, group) update_step(self, group, p, gindex, pindex) at: torch._C float: dtype = ... uint8: dtype = ... ===========unchanged ref 1=========== at: torch.autograd.grad_mode no_grad() at: torch.optim.optimizer.Optimizer.__init__ self.state: DefaultDict[torch.Tensor, Any] = defaultdict(dict)
tests.test_optim/assert_most_approx_close
Modified
bitsandbytes-foundation~bitsandbytes
2eb310835668f854c169953814f1d3b16a44346b
Fixed bug where beta2 was not passed into Lion 32-bit.
<3>:<add> print(f"Too many values not close: assert {error_count} < {max_error_count}") <del> print(f"Too many values not close: assert {sumval} < {count}")
# module: tests.test_optim def assert_most_approx_close(a, b, rtol=1e-3, atol=1e-3, max_error_count=0): <0> idx = torch.isclose(a, b, rtol, atol) <1> error_count = (idx == 0).sum().item() <2> if error_count > max_error_count: <3> print(f"Too many values not close: assert {sumval} < {count}") <4> torch.testing.assert_allclose(a, b, rtol, atol) <5>
===========changed ref 0=========== # module: bitsandbytes.optim.optimizer class Optimizer1State(Optimizer8bit): @torch.no_grad() def update_step(self, group, p, gindex, pindex): state = self.state[p] grad = p.grad config = self.get_config(gindex, pindex, group) state["step"] += 1 step = state["step"] if config["percentile_clipping"] < 100: current_gnorm, clip_value, gnorm_scale = F.percentile_clipping( grad, state["gnorm_vec"], step, config["percentile_clipping"] ) else: gnorm_scale = 1.0 if state["state1"].dtype == torch.float: F.optimizer_update_32bit( self.optimizer_name, grad, p, state["state1"], config["betas"][0], config["eps"], step, config["lr"], None, + config['betas'][1], - 0.0, config["weight_decay"], gnorm_scale, state["unorm_vec"] if config["max_unorm"] > 0.0 else None, max_unorm=config["max_unorm"], skip_zeros=config["skip_zeros"], ) elif state["state1"].dtype == torch.uint8 and not config["block_wise"]: F.optimizer_update_8bit( self.optimizer_name, grad, p, state["state1"], None, config["betas"][0], config["betas"][1], config["eps"], step, config["lr"], state["qmap1"], None, state["max1"], None, state["new_max1"], None, config["weight_decay"], gnorm_scale, state["</s> ===========changed ref 1=========== # module: bitsandbytes.optim.optimizer class Optimizer1State(Optimizer8bit): @torch.no_grad() def update_step(self, group, p, gindex, pindex): # offset: 1 <s>new_max1"], None, config["weight_decay"], gnorm_scale, state["unorm_vec"] if config["max_unorm"] > 0.0 else None, max_unorm=config["max_unorm"], ) state["max1"], state["new_max1"] = state["new_max1"], state["max1"] elif state["state1"].dtype == torch.uint8 and config["block_wise"]: F.optimizer_update_8bit_blockwise( self.optimizer_name, grad, p, state["state1"], None, config["betas"][0], config["betas"][1], config["eps"], step, config["lr"], state["qmap1"], None, state["absmax1"], None, config["weight_decay"], gnorm_scale=gnorm_scale, skip_zeros=config["skip_zeros"], )
tests.test_optim/test_optimizer32bit
Modified
bitsandbytes-foundation~bitsandbytes
2eb310835668f854c169953814f1d3b16a44346b
Fixed bug where beta2 was not passed into Lion 32-bit.
<22>:<add> <30>:<add> # since Lion can have pretty noisy updates where things lie at the boundary <add> # allow up to 10 errors for Lion <add> assert_most_approx_close(p1, p2.float(), atol, rtol, max_error_count=10) <del> torch.testing.assert_allclose(p1, p2.float(), atol=atol, rtol=rtol)
# module: tests.test_optim @pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names) def test_optimizer32bit(dim1, dim2, gtype, optim_name): <0> if dim1 == 1 and dim2 == 1: <1> return <2> p1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1 <3> p2 = p1.clone() <4> p1 = p1.float() <5> <6> torch_optimizer = str2optimizers[optim_name][0]([p1]) <7> bnb_optimizer = str2optimizers[optim_name][1]([p2]) <8> <9> if gtype == torch.float32: <10> atol, rtol = 1e-6, 1e-5 <11> else: <12> atol, rtol = 1e-4, 1e-3 <13> <14> for i in range(k): <15> g = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.01 <16> p1.grad = g.clone().float() <17> p2.grad = g.clone() <18> <19> bnb_optimizer.step() <20> torch_optimizer.step() <21> <22> for name1, name2 in str2statenames[optim_name]: <23> torch.testing.assert_allclose( <24> torch_optimizer.state[p1][name1], <25> bnb_optimizer.state[p2][name2], <26> atol=atol, <27> rtol=rtol, <28> ) <29> <30> torch.testing.assert_allclose(p1, p2.float(), atol=atol, rtol=rtol) <31> <32> if i % (k // 5) == 0 and i > 0: <33> path = get_temp_dir() <34> torch.save(bnb_optimizer.state_dict(), join(path, "opt.pt")) <35> del bnb_optimizer <36> bnb_optimizer</s>
===========below chunk 0=========== # module: tests.test_optim @pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names) def test_optimizer32bit(dim1, dim2, gtype, optim_name): # offset: 1 bnb_optimizer = str2optimizers[optim_name][1]([p2]) bnb_optimizer.load_state_dict(torch.load(join(path, "opt.pt"))) rm_path(path) torch.testing.assert_allclose(p1, p2.float(), atol=atol, rtol=rtol) for name1, name2 in str2statenames[optim_name]: torch.testing.assert_allclose( torch_optimizer.state[p1][name1], bnb_optimizer.state[p2][name2], atol=atol, rtol=rtol, ) if gtype == torch.float16: # the adam buffers should also be close because they are 32-bit # but the paramters can diverge because they are 16-bit # the difference grow larger and larger with each update # --> copy the state to keep weights close p1.data = p1.data.half().float() p2.copy_(p1.data) torch.testing.assert_allclose(p1.half(), p2) if optim_name in ["lars", "lamb"]: assert bnb_optimizer.state[p2]["unorm_vec"] > 0.0 ===========changed ref 0=========== # module: tests.test_optim def assert_most_approx_close(a, b, rtol=1e-3, atol=1e-3, max_error_count=0): idx = torch.isclose(a, b, rtol, atol) error_count = (idx == 0).sum().item() if error_count > max_error_count: + print(f"Too many values not close: assert {error_count} < {max_error_count}") - print(f"Too many values not close: assert {sumval} < {count}") torch.testing.assert_allclose(a, b, rtol, atol) ===========changed ref 1=========== # module: bitsandbytes.optim.optimizer class Optimizer1State(Optimizer8bit): @torch.no_grad() def update_step(self, group, p, gindex, pindex): state = self.state[p] grad = p.grad config = self.get_config(gindex, pindex, group) state["step"] += 1 step = state["step"] if config["percentile_clipping"] < 100: current_gnorm, clip_value, gnorm_scale = F.percentile_clipping( grad, state["gnorm_vec"], step, config["percentile_clipping"] ) else: gnorm_scale = 1.0 if state["state1"].dtype == torch.float: F.optimizer_update_32bit( self.optimizer_name, grad, p, state["state1"], config["betas"][0], config["eps"], step, config["lr"], None, + config['betas'][1], - 0.0, config["weight_decay"], gnorm_scale, state["unorm_vec"] if config["max_unorm"] > 0.0 else None, max_unorm=config["max_unorm"], skip_zeros=config["skip_zeros"], ) elif state["state1"].dtype == torch.uint8 and not config["block_wise"]: F.optimizer_update_8bit( self.optimizer_name, grad, p, state["state1"], None, config["betas"][0], config["betas"][1], config["eps"], step, config["lr"], state["qmap1"], None, state["max1"], None, state["new_max1"], None, config["weight_decay"], gnorm_scale, state["</s> ===========changed ref 2=========== # module: bitsandbytes.optim.optimizer class Optimizer1State(Optimizer8bit): @torch.no_grad() def update_step(self, group, p, gindex, pindex): # offset: 1 <s>new_max1"], None, config["weight_decay"], gnorm_scale, state["unorm_vec"] if config["max_unorm"] > 0.0 else None, max_unorm=config["max_unorm"], ) state["max1"], state["new_max1"] = state["new_max1"], state["max1"] elif state["state1"].dtype == torch.uint8 and config["block_wise"]: F.optimizer_update_8bit_blockwise( self.optimizer_name, grad, p, state["state1"], None, config["betas"][0], config["betas"][1], config["eps"], step, config["lr"], state["qmap1"], None, state["absmax1"], None, config["weight_decay"], gnorm_scale=gnorm_scale, skip_zeros=config["skip_zeros"], )