Upload OpenGVLab_InternVL3_5-8B_0.txt with huggingface_hub
Browse files
OpenGVLab_InternVL3_5-8B_0.txt
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
Traceback (most recent call last):
|
2 |
-
File "/tmp/OpenGVLab_InternVL3_5-
|
3 |
pipe = pipeline("image-text-to-text", model="OpenGVLab/InternVL3_5-8B", trust_remote_code=True)
|
4 |
-
File "/tmp/.cache/uv/environments-v2/92a0710a497a595f/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line
|
5 |
framework, model = infer_framework_load_model(
|
6 |
~~~~~~~~~~~~~~~~~~~~~~~~~~^
|
7 |
adapter_path if adapter_path is not None else model,
|
@@ -11,7 +11,7 @@ Traceback (most recent call last):
|
|
11 |
^^^^^^^^^^^^^^^
|
12 |
)
|
13 |
^
|
14 |
-
File "/tmp/.cache/uv/environments-v2/92a0710a497a595f/lib/python3.13/site-packages/transformers/pipelines/base.py", line
|
15 |
raise ValueError(
|
16 |
f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n"
|
17 |
)
|
@@ -19,26 +19,14 @@ ValueError: Could not load model OpenGVLab/InternVL3_5-8B with any of the follow
|
|
19 |
|
20 |
while loading with AutoModelForImageTextToText, an error is thrown:
|
21 |
Traceback (most recent call last):
|
22 |
-
File "/tmp/.cache/uv/environments-v2/92a0710a497a595f/lib/python3.13/site-packages/transformers/pipelines/base.py", line
|
23 |
model = model_class.from_pretrained(model, **kwargs)
|
24 |
-
File "/tmp/.cache/uv/environments-v2/92a0710a497a595f/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line
|
25 |
raise ValueError(
|
26 |
...<2 lines>...
|
27 |
)
|
28 |
-
ValueError: Unrecognized configuration class <class 'transformers_modules.OpenGVLab.InternVL3_5-8B.
|
29 |
-
Model type should be one of AriaConfig, AyaVisionConfig, BlipConfig, Blip2Config, ChameleonConfig, Cohere2VisionConfig, DeepseekVLConfig, DeepseekVLHybridConfig, Emu3Config, EvollaConfig, FuyuConfig, Gemma3Config, Gemma3nConfig, GitConfig, Glm4vConfig, GotOcr2Config, IdeficsConfig, Idefics2Config, Idefics3Config, InstructBlipConfig, InternVLConfig, JanusConfig, Kosmos2Config, Llama4Config, LlavaConfig, LlavaNextConfig, LlavaNextVideoConfig, LlavaOnevisionConfig, Mistral3Config, MllamaConfig, PaliGemmaConfig, PerceptionLMConfig, Pix2StructConfig, PixtralVisionConfig, Qwen2_5_VLConfig, Qwen2VLConfig, ShieldGemma2Config, SmolVLMConfig, UdopConfig, VipLlavaConfig, VisionEncoderDecoderConfig.
|
30 |
-
|
31 |
-
During handling of the above exception, another exception occurred:
|
32 |
-
|
33 |
-
Traceback (most recent call last):
|
34 |
-
File "/tmp/.cache/uv/environments-v2/92a0710a497a595f/lib/python3.13/site-packages/transformers/pipelines/base.py", line 310, in infer_framework_load_model
|
35 |
-
model = model_class.from_pretrained(model, **fp32_kwargs)
|
36 |
-
File "/tmp/.cache/uv/environments-v2/92a0710a497a595f/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 603, in from_pretrained
|
37 |
-
raise ValueError(
|
38 |
-
...<2 lines>...
|
39 |
-
)
|
40 |
-
ValueError: Unrecognized configuration class <class 'transformers_modules.OpenGVLab.InternVL3_5-8B.e7a151fc3d3ed407a6767598384293aa814faaee.configuration_internvl_chat.InternVLChatConfig'> for this kind of AutoModel: AutoModelForImageTextToText.
|
41 |
-
Model type should be one of AriaConfig, AyaVisionConfig, BlipConfig, Blip2Config, ChameleonConfig, Cohere2VisionConfig, DeepseekVLConfig, DeepseekVLHybridConfig, Emu3Config, EvollaConfig, FuyuConfig, Gemma3Config, Gemma3nConfig, GitConfig, Glm4vConfig, GotOcr2Config, IdeficsConfig, Idefics2Config, Idefics3Config, InstructBlipConfig, InternVLConfig, JanusConfig, Kosmos2Config, Llama4Config, LlavaConfig, LlavaNextConfig, LlavaNextVideoConfig, LlavaOnevisionConfig, Mistral3Config, MllamaConfig, PaliGemmaConfig, PerceptionLMConfig, Pix2StructConfig, PixtralVisionConfig, Qwen2_5_VLConfig, Qwen2VLConfig, ShieldGemma2Config, SmolVLMConfig, UdopConfig, VipLlavaConfig, VisionEncoderDecoderConfig.
|
42 |
|
43 |
|
44 |
|
|
|
1 |
Traceback (most recent call last):
|
2 |
+
File "/tmp/OpenGVLab_InternVL3_5-8B_06uMxsa.py", line 13, in <module>
|
3 |
pipe = pipeline("image-text-to-text", model="OpenGVLab/InternVL3_5-8B", trust_remote_code=True)
|
4 |
+
File "/tmp/.cache/uv/environments-v2/92a0710a497a595f/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1028, in pipeline
|
5 |
framework, model = infer_framework_load_model(
|
6 |
~~~~~~~~~~~~~~~~~~~~~~~~~~^
|
7 |
adapter_path if adapter_path is not None else model,
|
|
|
11 |
^^^^^^^^^^^^^^^
|
12 |
)
|
13 |
^
|
14 |
+
File "/tmp/.cache/uv/environments-v2/92a0710a497a595f/lib/python3.13/site-packages/transformers/pipelines/base.py", line 333, in infer_framework_load_model
|
15 |
raise ValueError(
|
16 |
f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n"
|
17 |
)
|
|
|
19 |
|
20 |
while loading with AutoModelForImageTextToText, an error is thrown:
|
21 |
Traceback (most recent call last):
|
22 |
+
File "/tmp/.cache/uv/environments-v2/92a0710a497a595f/lib/python3.13/site-packages/transformers/pipelines/base.py", line 293, in infer_framework_load_model
|
23 |
model = model_class.from_pretrained(model, **kwargs)
|
24 |
+
File "/tmp/.cache/uv/environments-v2/92a0710a497a595f/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 607, in from_pretrained
|
25 |
raise ValueError(
|
26 |
...<2 lines>...
|
27 |
)
|
28 |
+
ValueError: Unrecognized configuration class <class 'transformers_modules.OpenGVLab.InternVL3_5-8B.9bb6a56ad9cc69db95e2d4eeb15a52bbcac4ef79.configuration_internvl_chat.InternVLChatConfig'> for this kind of AutoModel: AutoModelForImageTextToText.
|
29 |
+
Model type should be one of AriaConfig, AyaVisionConfig, BlipConfig, Blip2Config, ChameleonConfig, Cohere2VisionConfig, DeepseekVLConfig, DeepseekVLHybridConfig, Emu3Config, EvollaConfig, Florence2Config, FuyuConfig, Gemma3Config, Gemma3nConfig, GitConfig, Glm4vConfig, Glm4vMoeConfig, GotOcr2Config, IdeficsConfig, Idefics2Config, Idefics3Config, InstructBlipConfig, InternVLConfig, JanusConfig, Kosmos2Config, Kosmos2_5Config, Llama4Config, LlavaConfig, LlavaNextConfig, LlavaNextVideoConfig, LlavaOnevisionConfig, Mistral3Config, MllamaConfig, Ovis2Config, PaliGemmaConfig, PerceptionLMConfig, Pix2StructConfig, PixtralVisionConfig, Qwen2_5_VLConfig, Qwen2VLConfig, ShieldGemma2Config, SmolVLMConfig, UdopConfig, VipLlavaConfig, VisionEncoderDecoderConfig.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
|
32 |
|