Problem:Setting `pad_token_id` to `eos_token_id`:151643 for open-end generation.

#16
by reda2000ach - opened

when executing this part of code:

Inference: Generation of the output

generated_ids = model.generate(**inputs, max_new_tokens=24000)

i get this error:

RuntimeError Traceback (most recent call last)
Cell In[17], line 2
1 # Inference: Generation of the output
----> 2 generated_ids = model.generate(**inputs, max_new_tokens=24000)
3 generated_ids_trimmed = [
4 out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
5 ]
6 output_text = processor.batch_decode(
7 generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
8 )

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\torch\utils_contextlib.py:116, in context_decorator..decorate_context(*args, **kwargs)
113 @functools.wraps(func)
114 def decorate_context(*args, **kwargs):
115 with ctx_factory():
--> 116 return func(*args, **kwargs)

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\transformers\generation\utils.py:2465, in GenerationMixin.generate(self, inputs, generation_config, logits_processor, stopping_criteria, prefix_allowed_tokens_fn, synced_gpus, assistant_model, streamer, negative_prompt_ids, negative_prompt_attention_mask, use_model_defaults, **kwargs)
2457 input_ids, model_kwargs = self._expand_inputs_for_generation(
2458 input_ids=input_ids,
2459 expand_size=generation_config.num_return_sequences,
2460 is_encoder_decoder=self.config.is_encoder_decoder,
2461 **model_kwargs,
2462 )
2464 # 12. run sample (it degenerates to greedy search when generation_config.do_sample=False)
-> 2465 result = self._sample(
2466 input_ids,
2467 logits_processor=prepared_logits_processor,
2468 stopping_criteria=prepared_stopping_criteria,
2469 generation_config=generation_config,
2470 synced_gpus=synced_gpus,
2471 streamer=streamer,
2472 **model_kwargs,
2473 )
2475 elif generation_mode in (GenerationMode.BEAM_SAMPLE, GenerationMode.BEAM_SEARCH):
2476 # 11. interleave input_ids with num_beams additional sequences per batch
2477 input_ids, model_kwargs = self._expand_inputs_for_generation(
2478 input_ids=input_ids,
2479 expand_size=generation_config.num_beams,
2480 is_encoder_decoder=self.config.is_encoder_decoder,
2481 **model_kwargs,
2482 )

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\transformers\generation\utils.py:3431, in GenerationMixin._sample(self, input_ids, logits_processor, stopping_criteria, generation_config, synced_gpus, streamer, **model_kwargs)
3428 model_inputs.update({"output_hidden_states": output_hidden_states} if output_hidden_states else {})
3430 if is_prefill:
-> 3431 outputs = self(**model_inputs, return_dict=True)
3432 is_prefill = False
3433 else:

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\torch\nn\modules\module.py:1751, in Module._wrapped_call_impl(self, *args, **kwargs)
1749 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1750 else:
-> 1751 return self._call_impl(*args, **kwargs)

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\torch\nn\modules\module.py:1762, in Module._call_impl(self, *args, **kwargs)
1757 # If we don't have any hooks, we want to skip the rest of the logic in
1758 # this function, and just call forward.
1759 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1760 or _global_backward_pre_hooks or _global_backward_hooks
1761 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1762 return forward_call(*args, **kwargs)
1764 result = None
1765 called_always_called_hooks = set()

File ~.cache\huggingface\modules\transformers_modules\DotsOCR\modeling_dots_ocr.py:89, in DotsOCRForCausalLM.forward(self, input_ids, pixel_values, image_grid_thw, inputs_embeds, attention_mask, position_ids, past_key_values, labels, output_attentions, output_hidden_states, return_dict, use_cache, logits_to_keep, **loss_kwargs)
87 if inputs_embeds is None:
88 img_mask = input_ids == self.config.image_token_id
---> 89 inputs_embeds = self.prepare_inputs_embeds(input_ids, pixel_values, image_grid_thw, img_mask)
91 outputs = super().forward(
92 inputs_embeds=inputs_embeds,
93 attention_mask=attention_mask,
(...) 102 **loss_kwargs,
103 )
105 return outputs

File ~.cache\huggingface\modules\transformers_modules\DotsOCR\modeling_dots_ocr.py:44, in DotsOCRForCausalLM.prepare_inputs_embeds(self, input_ids, pixel_values, grid_thw, img_mask)
39 if grid_thw.shape[0] > DOTS_VLM_MAX_IMAGES:
40 print(
41 f"Num image exceeded: {grid_thw.shape[0]} > {DOTS_VLM_MAX_IMAGES}, which may cause FSDP hang"
42 )
---> 44 vision_embeddings = self.vision_tower(pixel_values, grid_thw)
46 true_indices = torch.nonzero(img_mask).squeeze()
47 if len(true_indices) > vision_embeddings.size(0):

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\torch\nn\modules\module.py:1751, in Module._wrapped_call_impl(self, *args, **kwargs)
1749 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1750 else:
-> 1751 return self._call_impl(*args, **kwargs)

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\torch\nn\modules\module.py:1762, in Module._call_impl(self, *args, **kwargs)
1757 # If we don't have any hooks, we want to skip the rest of the logic in
1758 # this function, and just call forward.
1759 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1760 or _global_backward_pre_hooks or _global_backward_hooks
1761 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1762 return forward_call(*args, **kwargs)
1764 result = None
1765 called_always_called_hooks = set()

File ~.cache\huggingface\modules\transformers_modules\DotsOCR\modeling_dots_vision.py:398, in DotsVisionTransformer.forward(self, hidden_states, grid_thw, bf16)
391 hidden_states = self._gradient_checkpointing_func(
392 blk.call,
393 hidden_states,
394 cu_seqlens,
395 rotary_pos_emb,
396 )
397 else:
--> 398 hidden_states = blk(hidden_states, cu_seqlens=cu_seqlens, rotary_pos_emb=rotary_pos_emb)
400 if self.config.post_norm:
401 hidden_states = self.post_trunk_norm(hidden_states)

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\torch\nn\modules\module.py:1751, in Module._wrapped_call_impl(self, *args, **kwargs)
1749 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1750 else:
-> 1751 return self._call_impl(*args, **kwargs)

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\torch\nn\modules\module.py:1762, in Module._call_impl(self, *args, **kwargs)
1757 # If we don't have any hooks, we want to skip the rest of the logic in
1758 # this function, and just call forward.
1759 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1760 or _global_backward_pre_hooks or _global_backward_hooks
1761 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1762 return forward_call(*args, **kwargs)
1764 result = None
1765 called_always_called_hooks = set()

File ~.cache\huggingface\modules\transformers_modules\DotsOCR\modeling_dots_vision.py:283, in DotsVisionBlock.forward(self, hidden_states, cu_seqlens, rotary_pos_emb)
282 def forward(self, hidden_states, cu_seqlens, rotary_pos_emb) -> torch.Tensor:
--> 283 hidden_states = hidden_states + self.attn(
284 self.norm1(hidden_states), cu_seqlens=cu_seqlens, rotary_pos_emb=rotary_pos_emb
285 )
286 hidden_states = hidden_states + self.mlp(self.norm2(hidden_states))
287 return hidden_states

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\torch\nn\modules\module.py:1751, in Module._wrapped_call_impl(self, *args, **kwargs)
1749 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1750 else:
-> 1751 return self._call_impl(*args, **kwargs)

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\torch\nn\modules\module.py:1762, in Module._call_impl(self, *args, **kwargs)
1757 # If we don't have any hooks, we want to skip the rest of the logic in
1758 # this function, and just call forward.
1759 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1760 or _global_backward_pre_hooks or _global_backward_hooks
1761 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1762 return forward_call(*args, **kwargs)
1764 result = None
1765 called_always_called_hooks = set()

File ~.cache\huggingface\modules\transformers_modules\DotsOCR\modeling_dots_vision.py:149, in VisionFlashAttention2.forward(self, hidden_states, cu_seqlens, rotary_pos_emb)
147 k = apply_rotary_pos_emb_vision(k.unsqueeze(0), rotary_pos_emb).squeeze(0)
148 max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item()
--> 149 attn_output = flash_attn_varlen_func(
150 q, k, v, cu_seqlens, cu_seqlens, max_seqlen, max_seqlen, causal=self.is_causal
151 ).reshape(seq_length, -1)
152 attn_output = self.proj(attn_output)
154 return attn_output

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\flash_attn\flash_attn_interface.py:1448, in flash_attn_varlen_func(q, k, v, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, causal, window_size, softcap, alibi_slopes, deterministic, return_attn_probs, block_table)
1375 def flash_attn_varlen_func(
1376 q,
1377 k,
(...) 1391 block_table=None,
1392 ):
1393 """dropout_p should be set to 0.0 during evaluation
1394 Supports multi-query and grouped-query attention (MQA/GQA) by passing in K, V with fewer heads
1395 than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
(...) 1446 pattern (negative means that location was dropped, nonnegative means it was kept).
1447 """
-> 1448 return FlashAttnVarlenFunc.apply(
1449 q,
1450 k,
1451 v,
1452 cu_seqlens_q,
1453 cu_seqlens_k,
1454 max_seqlen_q,
1455 max_seqlen_k,
1456 dropout_p,
1457 softmax_scale,
1458 causal,
1459 window_size,
1460 softcap,
1461 alibi_slopes,
1462 deterministic,
1463 return_attn_probs,
1464 block_table,
1465 torch.is_grad_enabled(),
1466 )

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\torch\autograd\function.py:575, in Function.apply(cls, *args, **kwargs)
572 if not torch._C._are_functorch_transforms_active():
573 # See NOTE: [functorch vjp and autograd interaction]
574 args = _functorch.utils.unwrap_dead_wrappers(args)
--> 575 return super().apply(*args, **kwargs) # type: ignore[misc]
577 if not is_setup_ctx_defined:
578 raise RuntimeError(
579 "In order to use an autograd.Function with functorch transforms "
580 "(vmap, grad, jvp, jacrev, ...), it must override the setup_context "
581 "staticmethod. For more details, please see "
582 "https://pytorch.org/docs/main/notes/extending.func.html"
583 )

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\flash_attn\flash_attn_interface.py:930, in FlashAttnVarlenFunc.forward(ctx, q, k, v, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, causal, window_size, softcap, alibi_slopes, deterministic, return_softmax, block_table, is_grad_enabled)
928 k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
929 v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
--> 930 out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_varlen_forward(
931 q,
932 k,
933 v,
934 cu_seqlens_q,
935 cu_seqlens_k,
936 max_seqlen_q,
937 max_seqlen_k,
938 dropout_p,
939 softmax_scale,
940 causal=causal,
941 window_size_left=window_size[0],
942 window_size_right=window_size[1],
943 softcap=softcap,
944 alibi_slopes=alibi_slopes,
945 return_softmax=return_softmax and dropout_p > 0,
946 block_table=block_table,
947 )
948 if is_grad:
949 ctx.save_for_backward(
950 q, k, v, out_padded, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state
951 )

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\torch_ops.py:1158, in OpOverloadPacket.call(self, *args, **kwargs)
1156 if self._has_torchbind_op_overload and _must_dispatch_in_python(args, kwargs):
1157 return _call_overload_packet_from_python(self, args, kwargs)
-> 1158 return self._op(*args, **(kwargs or {}))

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\torch_library\autograd.py:113, in make_autograd_impl..autograd_impl(keyset, *args, **keyword_only_args)
111 result = Generated.apply(*args, Metadata(keyset, keyword_only_args)) # type: ignore[attr-defined]
112 else:
--> 113 result = forward_no_grad(*args, Metadata(keyset, keyword_only_args))
114 return result

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\torch_library\autograd.py:40, in make_autograd_impl..forward_no_grad(*args)
38 keyset = metadata.keyset
39 kwargs = metadata.keyword_only_args
---> 40 result = op.redispatch(keyset & _C._after_autograd_keyset, *args, **kwargs)
41 return result

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\torch_ops.py:761, in OpOverload.redispatch(self, keyset, *args, **kwargs)
760 def redispatch(self, /, keyset, *args, **kwargs):
--> 761 return self._handle.redispatch_boxed(keyset, *args, **kwargs)

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\torch_library\custom_ops.py:335, in CustomOpDef.register_kernel..inner..backend_impl(*args, **kwargs)
334 def backend_impl(*args, **kwargs):
--> 335 result = self._backend_fns[device_type](*args, **kwargs)
337 def get_module():
338 fn = self._backend_fns[device_type]

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\torch_compile.py:51, in _disable_dynamo..inner(*args, **kwargs)
48 disable_fn = torch._dynamo.disable(fn, recursive)
49 fn.__dynamo_disable = disable_fn # type: ignore[attr-defined]
---> 51 return disable_fn(*args, **kwargs)

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\torch_dynamo\eval_frame.py:838, in DisableContext.call.._fn(*args, **kwargs)
836 _maybe_set_eval_frame(_callback_from_stance(self.callback))
837 try:
--> 838 return fn(*args, **kwargs)
839 finally:
840 set_eval_frame(None)

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\torch_library\custom_ops.py:367, in CustomOpDef.register_kernel..inner..wrapped_fn(*args, **kwargs)
365 return self._init_fn(*args, **kwargs)
366 else:
--> 367 return fn(*args, **kwargs)

File c:\Users\reda.conda\envs\dots_ocr\Lib\site-packages\flash_attn\flash_attn_interface.py:170, in _flash_attn_varlen_forward(q, k, v, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, causal, window_size_left, window_size_right, softcap, alibi_slopes, return_softmax, block_table, leftpad_k, seqused_k, zero_tensors)
147 @_torch_custom_op_wrapper("flash_attn::_flash_attn_varlen_forward", mutates_args=(), device_types="cuda")
148 def _flash_attn_varlen_forward(
149 q: torch.Tensor,
(...) 167 zero_tensors: bool = False,
168 ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
169 q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
--> 170 out, softmax_lse, S_dmask, rng_state = flash_attn_gpu.varlen_fwd(
171 q,
172 k,
173 v,
174 None,
175 cu_seqlens_q,
176 cu_seqlens_k,
177 seqused_k,
178 leftpad_k,
179 block_table,
180 alibi_slopes,
181 max_seqlen_q,
182 max_seqlen_k,
183 dropout_p,
184 softmax_scale,
185 zero_tensors,
186 causal,
187 window_size_left,
188 window_size_right,
189 softcap,
190 return_softmax,
191 None,
192 )
193 # if out.isnan().any() or softmax_lse.isnan().any():
194 # breakpoint()
195 return out, softmax_lse, S_dmask, rng_state

RuntimeError: CUDA error: no kernel image is available for execution on the device
CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.
For debugging consider passing CUDA_LAUNCH_BLOCKING=1
Compile with TORCH_USE_CUDA_DSA to enable device-side assertions.

Exception raised from c10_cuda_check_implementation at C:\actions-runner_work\pytorch\pytorch\pytorch\c10\cuda\CUDAException.cpp:43 (most recent call first):
00007FFDAF982C2400007FFDAF982B80 c10.dll!c10::Error::Error [ @ ]
00007FFDAF98170A00007FFDAF9816B0 c10.dll!c10::detail::torchCheckFail [ @ ]
00007FFDE6B76BBF00007FFDE6B768B0 c10_cuda.dll!c10::cuda::c10_cuda_check_implementation [ @ ]
00007FFBE8C5626300007FFBE8B4CF80 flash_attn_2_cuda.cp312-win_amd64.pyd!PyInit_flash_attn_2_cuda [ @ ]
00007FFBE8C57F3C00007FFBE8B4CF80 flash_attn_2_cuda.cp312-win_amd64.pyd!PyInit_flash_attn_2_cuda [ @ ]
00007FFBE8C54B4700007FFBE8B4CF80 flash_attn_2_cuda.cp312-win_amd64.pyd!PyInit_flash_attn_2_cuda [ @ ]
00007FFBE8C51FD100007FFBE8B4CF80 flash_attn_2_cuda.cp312-win_amd64.pyd!PyInit_flash_attn_2_cuda [ @ ]
00007FFBE8B46DC000007FFBE8B35FE0 flash_attn_2_cuda.cp312-win_amd64.pyd!c10::ivalue::Object::operator= [ @ ]
00007FFBE8B5642900007FFBE8B4CF80 flash_attn_2_cuda.cp312-win_amd64.pyd!PyInit_flash_attn_2_cuda [ @ ]
00007FFBE8B53D0400007FFBE8B4CF80 flash_attn_2_cuda.cp312-win_amd64.pyd!PyInit_flash_attn_2_cuda [ @ ]
00007FFBE8B53DD400007FFBE8B4CF80 flash_attn_2_cuda.cp312-win_amd64.pyd!PyInit_flash_attn_2_cuda [ @ ]
00007FFBE8B3F3FC00007FFBE8B35FE0 flash_attn_2_cuda.cp312-win_amd64.pyd!c10::ivalue::Object::operator= [ @ ]
00007FFDADC2099300007FFDADC1FBF0 python312.dll!PyCFunction_GetFlags [ @ ]
00007FFDADBD41CD00007FFDADBD40A0 python312.dll!PyObject_MakeTpCall [ @ ]
00007FFDADBD44F500007FFDADBD44C0 python312.dll!PyObject_Vectorcall [ @ ]
00007FFDADD0FBE300007FFDADD096F0 python312.dll!PyEval_EvalFrameDefault [ @ ]
00007FFDADBD47C400007FFDADBD4770 python312.dll!PyFunction_Vectorcall [ @ ]
00007FFDADBD444300007FFDADBD42B0 python312.dll!PyVectorcall_Function [ @ ]
00007FFDADBD458600007FFDADBD4540 python312.dll!PyObject_Call [ @ ]
00007FFC789C9A1A00007FFC789A2EB0 torch_python.dll!torch::FunctionParameter::type_name [ @ ]
00007FFC789C5A2F00007FFC789A2EB0 torch_python.dll!torch::FunctionParameter::type_name [ @ ]
00007FFC789BD2CB00007FFC789A2EB0 torch_python.dll!torch::FunctionParameter::type_name [ @ ]
00007FFC789A357600007FFC789A2EB0 torch_python.dll!torch::FunctionParameter::type_name [ @ ]
00007FFC780D182C00007FFC780D0990 torch_python.dll!c10::ivalue::Future::devices [ @ ]
00007FFDADC2099300007FFDADC1FBF0 python312.dll!PyCFunction_GetFlags [ @ ]
00007FFDADBD41CD00007FFDADBD40A0 python312.dll!PyObject_MakeTpCall [ @ ]
00007FFDADBD74BF00007FFDADBD7340 python312.dll!PyMethod_Self [ @ ]
00007FFDADBD444300007FFDADBD42B0 python312.dll!PyVectorcall_Function [ @ ]
00007FFDADBD458600007FFDADBD4540 python312.dll!PyObject_Call [ @ ]
00007FFDADD0FEDC00007FFDADD096F0 python312.dll!PyEval_EvalFrameDefault [ @ ]
00007FFDADBD47C400007FFDADBD4770 python312.dll!PyFunction_Vectorcall [ @ ]
00007FFDADBD6E2100007FFDADBD6A20 python312.dll!PyCell_Set [ @ ]
00007FFDADBD74BF00007FFDADBD7340 python312.dll!PyMethod_Self [ @ ]
00007FFDADBD444300007FFDADBD42B0 python312.dll!PyVectorcall_Function [ @ ]
00007FFDADBD458600007FFDADBD4540 python312.dll!PyObject_Call [ @ ]
00007FFDADD0FEDC00007FFDADD096F0 python312.dll!PyEval_EvalFrameDefault [ @ ]
00007FFDADBD47C400007FFDADBD4770 python312.dll!PyFunction_Vectorcall [ @ ]
00007FFDADBD444300007FFDADBD42B0 python312.dll!PyVectorcall_Function [ @ ]
00007FFDADBD458600007FFDADBD4540 python312.dll!PyObject_Call [ @ ]
00007FFC789C998F00007FFC789A2EB0 torch_python.dll!torch::FunctionParameter::type_name [ @ ]
00007FFC7ECF0B6100007FFC7ECF09A0 torch_cpu.dll!c10::Dispatcher::callBoxed [ @ ]
00007FFC787907E500007FFC78790720 torch_python.dll!torch::jit::invokeOperatorFromPython [ @ ]
00007FFC7878D6FD00007FFC7878D5C0 torch_python.dll!torch::jit::_get_operation_for_overload_or_packet [ @ ]
00007FFC786F478600007FFC786DFDE0 torch_python.dll!c10d::PythonOnCompletionHook::PythonOnCompletionHook [ @ ]
00007FFC78697FA600007FFC7861EA90 torch_python.dll!THPPointer::dup [ @ ]
00007FFC780D182C00007FFC780D0990 torch_python.dll!c10::ivalue::Future::devices [ @ ]
00007FFDADC2099300007FFDADC1FBF0 python312.dll!PyCFunction_GetFlags [ @ ]
00007FFDADBD461600007FFDADBD4540 python312.dll!PyObject_Call [ @ ]
00007FFDADD0FEDC00007FFDADD096F0 python312.dll!PyEval_EvalFrameDefault [ @ ]
00007FFDADBD47C400007FFDADBD4770 python312.dll!PyFunction_Vectorcall [ @ ]
00007FFDADBD3E6100007FFDADBD3DB0 python312.dll!PyObject_FastCallDictTstate [ @ ]
00007FFDADBD4A8200007FFDADBD49E0 python312.dll!PyObject_Call_Prepend [ @ ]
00007FFDADC4CCA100007FFDADC487D0 python312.dll!PyType_Ready [ @ ]
00007FFDADBD41CD00007FFDADBD40A0 python312.dll!PyObject_MakeTpCall [ @ ]
00007FFDADBD44F500007FFDADBD44C0 python312.dll!PyObject_Vectorcall [ @ ]
00007FFDADD0FBE300007FFDADD096F0 python312.dll!PyEval_EvalFrameDefault [ @ ]
00007FFDADBD47C400007FFDADBD4770 python312.dll!PyFunction_Vectorcall [ @ ]
00007FFDADBD444300007FFDADBD42B0 python312.dll!PyVectorcall_Function [ @ ]
00007FFDADBD458600007FFDADBD4540 python312.dll!PyObject_Call [ @ ]
00007FFC7857BF9500007FFC78576BB0 torch_python.dll!torch::autograd::registerFunctionPreHook [ @ ]
00007FFDADC209CE00007FFDADC1FBF0 python312.dll!PyCFunction_GetFlags [ @ ]
00007FFDADBD461600007FFDADBD4540 python312.dll!PyObject_Call [ @ ]
00007FFDADD0FEDC00007FFDADD096F0 python312.dll!PyEval_EvalFrameDefault [ @ ]
00007FFDADBD47C400007FFDADBD4770 python312.dll!PyFunction_Vectorcall [ @ ]

please help me , it doesnt work, i still get the same error

Sign up or log in to comment