ariG23498 HF Staff commited on
Commit
d7f9a9d
·
verified ·
1 Parent(s): a88f3c2

Upload inclusionAI_LLaDA2.0-mini-preview_0.txt with huggingface_hub

Browse files
inclusionAI_LLaDA2.0-mini-preview_0.txt ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Traceback (most recent call last):
2
+ File "/tmp/inclusionAI_LLaDA2.0-mini-preview_0VZwTzf.py", line 16, in <module>
3
+ pipe = pipeline("text-generation", model="inclusionAI/LLaDA2.0-mini-preview", trust_remote_code=True)
4
+ File "/tmp/.cache/uv/environments-v2/b091aa2c78468aae/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1229, in pipeline
5
+ return pipeline_class(model=model, framework=framework, task=task, **kwargs)
6
+ File "/tmp/.cache/uv/environments-v2/b091aa2c78468aae/lib/python3.13/site-packages/transformers/pipelines/text_generation.py", line 121, in __init__
7
+ super().__init__(*args, **kwargs)
8
+ ~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
9
+ File "/tmp/.cache/uv/environments-v2/b091aa2c78468aae/lib/python3.13/site-packages/transformers/pipelines/base.py", line 1044, in __init__
10
+ self.model.to(self.device)
11
+ ~~~~~~~~~~~~~^^^^^^^^^^^^^
12
+ File "/tmp/.cache/uv/environments-v2/b091aa2c78468aae/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4343, in to
13
+ return super().to(*args, **kwargs)
14
+ ~~~~~~~~~~^^^^^^^^^^^^^^^^^
15
+ File "/tmp/.cache/uv/environments-v2/b091aa2c78468aae/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1371, in to
16
+ return self._apply(convert)
17
+ ~~~~~~~~~~~^^^^^^^^^
18
+ File "/tmp/.cache/uv/environments-v2/b091aa2c78468aae/lib/python3.13/site-packages/torch/nn/modules/module.py", line 930, in _apply
19
+ module._apply(fn)
20
+ ~~~~~~~~~~~~~^^^^
21
+ File "/tmp/.cache/uv/environments-v2/b091aa2c78468aae/lib/python3.13/site-packages/torch/nn/modules/module.py", line 930, in _apply
22
+ module._apply(fn)
23
+ ~~~~~~~~~~~~~^^^^
24
+ File "/tmp/.cache/uv/environments-v2/b091aa2c78468aae/lib/python3.13/site-packages/torch/nn/modules/module.py", line 930, in _apply
25
+ module._apply(fn)
26
+ ~~~~~~~~~~~~~^^^^
27
+ [Previous line repeated 2 more times]
28
+ File "/tmp/.cache/uv/environments-v2/b091aa2c78468aae/lib/python3.13/site-packages/torch/nn/modules/module.py", line 957, in _apply
29
+ param_applied = fn(param)
30
+ File "/tmp/.cache/uv/environments-v2/b091aa2c78468aae/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1357, in convert
31
+ return t.to(
32
+ ~~~~^
33
+ device,
34
+ ^^^^^^^
35
+ dtype if t.is_floating_point() or t.is_complex() else None,
36
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
37
+ non_blocking,
38
+ ^^^^^^^^^^^^^
39
+ )
40
+ ^
41
+ torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 12.00 MiB. GPU 0 has a total capacity of 22.30 GiB of which 4.69 MiB is free. Process 25633 has 22.29 GiB memory in use. Of the allocated memory 22.05 GiB is allocated by PyTorch, and 1.86 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)