tranformers v4.54 compatibility
Browse files- processing_maira2.py +1 -4
processing_maira2.py
CHANGED
|
@@ -11,7 +11,7 @@ from transformers import BaseImageProcessor, LlavaProcessor, PreTrainedTokenizer
|
|
| 11 |
from transformers.feature_extraction_utils import BatchFeature
|
| 12 |
from transformers.image_utils import ImageInput, get_image_size, to_numpy_array
|
| 13 |
from transformers.models.llava.processing_llava import LlavaProcessorKwargs
|
| 14 |
-
from transformers.processing_utils import Unpack
|
| 15 |
from transformers.tokenization_utils_base import PreTokenizedInput, TextInput
|
| 16 |
|
| 17 |
|
|
@@ -696,9 +696,6 @@ class Maira2Processor(LlavaProcessor):
|
|
| 696 |
if images is None and text is None:
|
| 697 |
raise ValueError("You have to specify at least one of `images` or `text`.")
|
| 698 |
|
| 699 |
-
# check if images and text inputs are reversed for BC
|
| 700 |
-
images, text = _validate_images_text_input_order(images, text)
|
| 701 |
-
|
| 702 |
output_kwargs = self._merge_kwargs(
|
| 703 |
LlavaProcessorKwargs,
|
| 704 |
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
|
|
|
|
| 11 |
from transformers.feature_extraction_utils import BatchFeature
|
| 12 |
from transformers.image_utils import ImageInput, get_image_size, to_numpy_array
|
| 13 |
from transformers.models.llava.processing_llava import LlavaProcessorKwargs
|
| 14 |
+
from transformers.processing_utils import Unpack
|
| 15 |
from transformers.tokenization_utils_base import PreTokenizedInput, TextInput
|
| 16 |
|
| 17 |
|
|
|
|
| 696 |
if images is None and text is None:
|
| 697 |
raise ValueError("You have to specify at least one of `images` or `text`.")
|
| 698 |
|
|
|
|
|
|
|
|
|
|
| 699 |
output_kwargs = self._merge_kwargs(
|
| 700 |
LlavaProcessorKwargs,
|
| 701 |
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
|