text
stringlengths
31
243k
type
stringclasses
1 value
start
int64
36
275k
end
int64
286
280k
depth
int64
0
1
filepath
stringlengths
85
188
parent_class
stringclasses
3 values
class_index
int64
0
10.8k
class TFSeq2SeqLMOutput(ModelOutput): """ Base class for sequence-to-sequence language models outputs. Args: loss (`tf.Tensor` of shape `(n,)`, *optional*, where n is the number of non-masked labels, returned when `labels` is provided): Language modeling loss. logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None decoder_hidden_states: Tuple[tf.Tensor] | None = None decoder_attentions: Tuple[tf.Tensor] | None = None cross_attentions: Tuple[tf.Tensor] | None = None encoder_last_hidden_state: tf.Tensor | None = None encoder_hidden_states: Tuple[tf.Tensor] | None = None encoder_attentions: Tuple[tf.Tensor] | None = None
class_definition
27,417
31,307
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
300
class TFNextSentencePredictorOutput(ModelOutput): """ Base class for outputs of models predicting if two sentences are consecutive or not. Args: loss (`tf.Tensor` of shape `(n,)`, *optional*, where n is the number of non-masked labels, returned when `next_sentence_label` is provided): Next sentence prediction loss. logits (`tf.Tensor` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None
class_definition
31,321
32,892
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
301
class TFSequenceClassifierOutput(ModelOutput): """ Base class for outputs of sentence classification models. Args: loss (`tf.Tensor` of shape `(batch_size, )`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None
class_definition
32,906
34,390
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
302
class TFSeq2SeqSequenceClassifierOutput(ModelOutput): """ Base class for outputs of sequence-to-sequence sentence classification models. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `label` is provided): Classification (or regression if config.num_labels==1) loss. logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)` encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None decoder_hidden_states: Tuple[tf.Tensor] | None = None decoder_attentions: Tuple[tf.Tensor] | None = None cross_attentions: Tuple[tf.Tensor] | None = None encoder_last_hidden_state: tf.Tensor | None = None encoder_hidden_states: Tuple[tf.Tensor] | None = None encoder_attentions: Tuple[tf.Tensor] | None = None
class_definition
34,404
38,103
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
303
class TFSemanticSegmenterOutput(ModelOutput): """ Base class for outputs of semantic segmentation models. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`tf.Tensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`): Classification scores for each pixel. <Tip warning={true}> The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the original image size as post-processing. You should always check your logits shape and resize as needed. </Tip> hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, patch_size, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None
class_definition
38,117
40,004
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
304
class TFSemanticSegmenterOutputWithNoAttention(ModelOutput): """ Base class for outputs of semantic segmentation models that do not output attention scores. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`tf.Tensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`): Classification scores for each pixel. <Tip warning={true}> The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the original image size as post-processing. You should always check your logits shape and resize as needed. </Tip> hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, patch_size, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None
class_definition
40,018
41,508
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
305
class TFImageClassifierOutput(ModelOutput): """ Base class for outputs of image classification models. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the model at the output of each stage. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None
class_definition
41,522
43,002
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
306
class TFMultipleChoiceModelOutput(ModelOutput): """ Base class for outputs of multiple choice models. Args: loss (`tf.Tensor` of shape *(batch_size, )*, *optional*, returned when `labels` is provided): Classification loss. logits (`tf.Tensor` of shape `(batch_size, num_choices)`): *num_choices* is the second dimension of the input tensors. (see *input_ids* above). Classification scores (before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None
class_definition
43,016
44,505
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
307
class TFTokenClassifierOutput(ModelOutput): """ Base class for outputs of token classification models. Args: loss (`tf.Tensor` of shape `(n,)`, *optional*, where n is the number of unmasked labels, returned when `labels` is provided) : Classification loss. logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None
class_definition
44,519
45,967
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
308
class TFQuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of question answering models. Args: loss (`tf.Tensor` of shape `(batch_size, )`, *optional*, returned when `start_positions` and `end_positions` are provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None start_logits: tf.Tensor = None end_logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None
class_definition
45,981
47,643
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
309
class TFSeq2SeqQuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of sequence-to-sequence question answering models. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None start_logits: tf.Tensor = None end_logits: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None decoder_hidden_states: Tuple[tf.Tensor] | None = None decoder_attentions: Tuple[tf.Tensor] | None = None encoder_last_hidden_state: tf.Tensor | None = None encoder_hidden_states: Tuple[tf.Tensor] | None = None encoder_attentions: Tuple[tf.Tensor] | None = None
class_definition
47,657
51,168
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
310
class TFSequenceClassifierOutputWithPast(ModelOutput): """ Base class for outputs of sentence classification models. Args: loss (`tf.Tensor` of shape `(batch_size, )`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None
class_definition
51,182
53,206
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
311
class TFImageClassifierOutputWithNoAttention(ModelOutput): """ Base class for outputs of image classification models. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at the output of each stage. """ loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Optional[Tuple[tf.Tensor, ...]] = None
class_definition
53,220
54,274
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
312
class TFMaskedImageModelingOutput(ModelOutput): """ Base class for outputs of masked image completion / in-painting models. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided): Reconstruction loss. reconstruction (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Reconstructed / completed images. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the model at the output of each stage. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None reconstruction: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None @property def logits(self): warnings.warn( "logits attribute is deprecated and will be removed in version 5 of Transformers." " Please use the reconstruction attribute to retrieve the final output instead.", FutureWarning, ) return self.reconstruction
class_definition
54,288
56,073
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_outputs.py
null
313
class TensorFlowBenchmark(Benchmark): args: TensorFlowBenchmarkArguments configs: PretrainedConfig framework: str = "TensorFlow" @property def framework_version(self): return tf.__version__ def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float: # initialize GPU on separate process strategy = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow.") _inference = self._prepare_inference_func(model_name, batch_size, sequence_length) return self._measure_speed(_inference) def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float: strategy = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow.") _train = self._prepare_train_func(model_name, batch_size, sequence_length) return self._measure_speed(_train) def _inference_memory( self, model_name: str, batch_size: int, sequence_length: int ) -> [Memory, Optional[MemorySummary]]: # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], True) strategy = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow.") _inference = self._prepare_inference_func(model_name, batch_size, sequence_length) return self._measure_memory(_inference) def _train_memory( self, model_name: str, batch_size: int, sequence_length: int ) -> [Memory, Optional[MemorySummary]]: if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], True) strategy = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow.") _train = self._prepare_train_func(model_name, batch_size, sequence_length) return self._measure_memory(_train) def _prepare_inference_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]: config = self.config_dict[model_name] if self.args.fp16: raise NotImplementedError("Mixed precision is currently not supported.") has_model_class_in_config = ( hasattr(config, "architectures") and isinstance(config.architectures, list) and len(config.architectures) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: model_class = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model transformers_module = __import__("transformers", fromlist=[model_class]) model_cls = getattr(transformers_module, model_class) model = model_cls(config) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" " set `--only_pretrain_model` or `args.only_pretrain_model=True`." ) else: model = TF_MODEL_MAPPING[config.__class__](config) # encoder-decoder has vocab size saved differently vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size input_ids = random_input_ids(batch_size, sequence_length, vocab_size) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla) def encoder_decoder_forward(): return model(input_ids, decoder_input_ids=input_ids, training=False) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla) def encoder_forward(): return model(input_ids, training=False) _inference = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def _prepare_train_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]: config = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.") if self.args.fp16: raise NotImplementedError("Mixed precision is currently not supported.") has_model_class_in_config = ( hasattr(config, "architectures") and isinstance(config.architectures, list) and len(config.architectures) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: model_class = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model transformers_module = __import__("transformers", fromlist=[model_class]) model_cls = getattr(transformers_module, model_class) model = model_cls(config) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" " set `--only_pretrain_model` or `args.only_pretrain_model=True`." ) else: model = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](config) # encoder-decoder has vocab size saved differently vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size input_ids = random_input_ids(batch_size, sequence_length, vocab_size) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla) def encoder_decoder_train(): loss = model(input_ids, decoder_input_ids=input_ids, labels=input_ids, training=True)[0] gradients = tf.gradients(loss, model.trainable_variables) return gradients @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla) def encoder_train(): loss = model(input_ids, labels=input_ids, training=True)[0] gradients = tf.gradients(loss, model.trainable_variables) return gradients _train = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def _measure_speed(self, func) -> float: with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("Do inference on TPU. Running model 5 times to stabilize compilation") timeit.repeat(func, repeat=1, number=5) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average runtimes = timeit.repeat( func, repeat=self.args.repeat, number=10, ) return min(runtimes) / 10.0 except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}") def _measure_memory(self, func: Callable[[], None]) -> [Memory, MemorySummary]: logger.info( "Note that TensorFlow allocates more memory than " "it might need to speed up computation. " "The memory reported here corresponds to the memory " "reported by `nvidia-smi`, which can vary depending " "on total available memory on the GPU that is used." ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( "`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory" " consumption line by line." ) trace = start_memory_tracing("transformers") if self.args.is_tpu: # tpu raise NotImplementedError( "Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking" " with `args.memory=False`" ) elif self.args.is_gpu: # gpu if not is_py3nvml_available(): logger.warning( "py3nvml not installed, we won't log GPU memory usage. " "Install py3nvml (pip install py3nvml) to log information about GPU." ) memory = "N/A" else: logger.info( "Measuring total GPU usage on GPU device. Make sure to not have additional processes" " running on the same GPU." ) # init nvml nvml.nvmlInit() func() handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx) meminfo = nvml.nvmlDeviceGetMemoryInfo(handle) max_bytes_in_use = meminfo.used memory = Memory(max_bytes_in_use) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( "When enabling line by line tracing, the max peak memory for CPU is inaccurate in" " TensorFlow." ) memory = None else: memory_bytes = measure_peak_memory_cpu(func) memory = Memory(memory_bytes) if isinstance(memory_bytes, int) else memory_bytes if self.args.trace_memory_line_by_line: summary = stop_memory_tracing(trace) if memory is None: memory = summary.total else: summary = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}") return "N/A", None
class_definition
2,522
13,245
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/benchmark/benchmark_tf.py
null
314
class PyTorchBenchmark(Benchmark): args: PyTorchBenchmarkArguments configs: PretrainedConfig framework: str = "PyTorch" @property def framework_version(self): return torch.__version__ def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float: _inference = self._prepare_inference_func(model_name, batch_size, sequence_length) return self._measure_speed(_inference) def _inference_memory( self, model_name: str, batch_size: int, sequence_length: int ) -> [Memory, Optional[MemorySummary]]: _inference = self._prepare_inference_func(model_name, batch_size, sequence_length) return self._measure_memory(_inference) def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float: _train = self._prepare_train_func(model_name, batch_size, sequence_length) return self._measure_speed(_train) def _train_memory( self, model_name: str, batch_size: int, sequence_length: int ) -> [Memory, Optional[MemorySummary]]: _train = self._prepare_train_func(model_name, batch_size, sequence_length) return self._measure_memory(_train) def _prepare_inference_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]: config = self.config_dict[model_name] if self.args.torchscript: config.torchscript = True has_model_class_in_config = ( hasattr(config, "architectures") and isinstance(config.architectures, list) and len(config.architectures) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: model_class = config.architectures[0] transformers_module = __import__("transformers", fromlist=[model_class]) model_cls = getattr(transformers_module, model_class) model = model_cls(config) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" " set `--only_pretrain_model` or `args.only_pretrain_model=True`." ) else: model = MODEL_MAPPING[config.__class__](config) model.eval() model.to(self.args.device) # encoder-decoder has vocab size saved differently vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=self.args.device) if self.args.fp16: logger.info("Running training in Mixed Precision...") if not self.args.is_gpu: raise ValueError("Mixed precision is possible only for GPU.") # amp seems to have memory leaks so that memory usage # is measured using .half() for now https://github.com/NVIDIA/apex/issues/439 model.half() if self.args.torchscript: with torch.no_grad(): inference_model = torch.jit.trace(model, input_ids) else: inference_model = model def encoder_decoder_forward(): with torch.no_grad(): outputs = inference_model(input_ids, decoder_input_ids=input_ids) return outputs def encoder_forward(): with torch.no_grad(): outputs = inference_model(input_ids) return outputs _forward = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _forward def _prepare_train_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]: config = self.config_dict[model_name] has_model_class_in_config = ( hasattr(config, "architectures") and isinstance(config.architectures, list) and len(config.architectures) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: model_class = config.architectures[0] transformers_module = __import__("transformers", fromlist=[model_class]) model_cls = getattr(transformers_module, model_class) model = model_cls(config) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" " set `--only_pretrain_model` or `args.only_pretrain_model=True`." ) else: model = MODEL_WITH_LM_HEAD_MAPPING[config.__class__](config) if self.args.torchscript: raise NotImplementedError("Training for torchscript is currently not implemented") else: train_model = model model.train() model.to(self.args.device) # encoder-decoder has vocab size saved differently vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=self.args.device) if self.args.fp16: logger.info("Running training in Mixed Precision...") if not self.args.is_gpu: raise ValueError("Mixed precision is possible only for GPU.") # amp seems to have memory leaks so that memory usage # is measured using .half() for now https://github.com/NVIDIA/apex/issues/439 model.half() def compute_loss_and_backprob_encoder(): loss = train_model(input_ids, labels=input_ids)[0] loss.backward() return loss def compute_loss_and_backprob_encoder_decoder(): loss = train_model(input_ids, decoder_input_ids=input_ids, labels=input_ids)[0] loss.backward() return loss _train = ( compute_loss_and_backprob_encoder_decoder if config.is_encoder_decoder else compute_loss_and_backprob_encoder ) return _train def _measure_speed(self, func) -> float: try: if self.args.is_tpu or self.args.torchscript: # run additional 10 times to stabilize compilation for tpu and torchscript logger.info("Do inference on TPU or torchscript. Running model 5 times to stabilize compilation") timeit.repeat( func, repeat=1, number=5, ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average runtimes = timeit.repeat( func, repeat=self.args.repeat, number=10, ) if self.args.is_tpu and self.args.torch_xla_tpu_print_metrics: import torch_xla.debug.metrics as met self.print_fn(met.metrics_report()) return min(runtimes) / 10.0 except RuntimeError as e: self.print_fn(f"Doesn't fit on GPU. {e}") return "N/A" def _measure_memory(self, func: Callable[[], None]) -> [Memory, MemorySummary]: try: if self.args.trace_memory_line_by_line: trace = start_memory_tracing("transformers") if self.args.is_tpu: # tpu raise NotImplementedError( "Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking with" " `--no-memory` or `args.memory=False`" ) elif self.args.is_gpu: if not is_py3nvml_available(): logger.warning( "py3nvml not installed, we won't log GPU memory usage. " "Install py3nvml (pip install py3nvml) to log information about GPU." ) memory = "N/A" else: logger.info( "Measuring total GPU usage on GPU device. Make sure to not have additional processes running" " on the same GPU." ) # init nvml nvml.nvmlInit() func() handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx) meminfo = nvml.nvmlDeviceGetMemoryInfo(handle) max_bytes_in_use = meminfo.used memory = Memory(max_bytes_in_use) # shutdown nvml nvml.nvmlShutdown() else: # cpu memory_bytes = measure_peak_memory_cpu(func) memory = Memory(memory_bytes) if isinstance(memory_bytes, int) else memory_bytes if self.args.trace_memory_line_by_line: summary = stop_memory_tracing(trace) else: summary = None return memory, summary except RuntimeError as e: self.print_fn(f"Doesn't fit on GPU. {e}") return "N/A", None
class_definition
1,365
10,746
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/benchmark/benchmark.py
null
315
class PyTorchBenchmarkArguments(BenchmarkArguments): deprecated_args = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__(self, **kwargs): """ This __init__ is there for legacy code. When removing deprecated args completely, the class can simply be deleted """ for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: positive_arg = deprecated_arg[3:] setattr(self, positive_arg, not kwargs.pop(deprecated_arg)) logger.warning( f"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or" f" {positive_arg}={kwargs[positive_arg]}" ) self.torchscript = kwargs.pop("torchscript", self.torchscript) self.torch_xla_tpu_print_metrics = kwargs.pop("torch_xla_tpu_print_metrics", self.torch_xla_tpu_print_metrics) self.fp16_opt_level = kwargs.pop("fp16_opt_level", self.fp16_opt_level) super().__init__(**kwargs) torchscript: bool = field(default=False, metadata={"help": "Trace the models using torchscript"}) torch_xla_tpu_print_metrics: bool = field(default=False, metadata={"help": "Print Xla/PyTorch tpu metrics"}) fp16_opt_level: str = field( default="O1", metadata={ "help": ( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " "See details at https://nvidia.github.io/apex/amp.html" ) }, ) @cached_property def _setup_devices(self) -> Tuple["torch.device", int]: requires_backends(self, ["torch"]) logger.info("PyTorch: setting up devices") if not self.cuda: device = torch.device("cpu") n_gpu = 0 elif is_torch_xla_available(): device = xm.xla_device() n_gpu = 0 elif is_torch_xpu_available(): device = torch.device("xpu") n_gpu = torch.xpu.device_count() else: device = torch.device("cuda" if torch.cuda.is_available() else "cpu") n_gpu = torch.cuda.device_count() return device, n_gpu @property def is_tpu(self): return is_torch_xla_available() and self.tpu @property def device_idx(self) -> int: requires_backends(self, ["torch"]) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def device(self) -> "torch.device": requires_backends(self, ["torch"]) return self._setup_devices[0] @property def n_gpu(self): requires_backends(self, ["torch"]) return self._setup_devices[1] @property def is_gpu(self): return self.n_gpu > 0
class_definition
1,120
4,049
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/benchmark/benchmark_args.py
null
316
class BenchmarkArguments: """ BenchMarkArguments are arguments we use in our benchmark scripts **which relate to the training loop itself**. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ models: List[str] = list_field( default=[], metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) }, ) batch_sizes: List[int] = list_field( default=[8], metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) sequence_lengths: List[int] = list_field( default=[8, 32, 128, 512], metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"}, ) inference: bool = field( default=True, metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."}, ) cuda: bool = field( default=True, metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."}, ) tpu: bool = field( default=True, metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) fp16: bool = field(default=False, metadata={"help": "Use FP16 to accelerate inference."}) training: bool = field(default=False, metadata={"help": "Benchmark training of model"}) verbose: bool = field(default=False, metadata={"help": "Verbose memory tracing"}) speed: bool = field( default=True, metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."}, ) memory: bool = field( default=True, metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" }, ) trace_memory_line_by_line: bool = field(default=False, metadata={"help": "Trace memory line by line"}) save_to_csv: bool = field(default=False, metadata={"help": "Save result to a CSV file"}) log_print: bool = field(default=False, metadata={"help": "Save all print statements in a log file"}) env_print: bool = field(default=False, metadata={"help": "Whether to print environment information"}) multi_process: bool = field( default=True, metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) }, ) inference_time_csv_file: str = field( default=f"inference_time_{round(time())}.csv", metadata={"help": "CSV filename used if saving time results to csv."}, ) inference_memory_csv_file: str = field( default=f"inference_memory_{round(time())}.csv", metadata={"help": "CSV filename used if saving memory results to csv."}, ) train_time_csv_file: str = field( default=f"train_time_{round(time())}.csv", metadata={"help": "CSV filename used if saving time results to csv for training."}, ) train_memory_csv_file: str = field( default=f"train_memory_{round(time())}.csv", metadata={"help": "CSV filename used if saving memory results to csv for training."}, ) env_info_csv_file: str = field( default=f"env_info_{round(time())}.csv", metadata={"help": "CSV filename used if saving environment information."}, ) log_filename: str = field( default=f"log_{round(time())}.csv", metadata={"help": "Log filename used if print statements are saved in log."}, ) repeat: int = field(default=3, metadata={"help": "Times an experiment will be run."}) only_pretrain_model: bool = field( default=False, metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) }, ) def __post_init__(self): warnings.warn( f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils" " are deprecated in general and it is advised to use external Benchmarking libraries " " to benchmark Transformer models.", FutureWarning, ) def to_json_string(self): """ Serializes this instance to a JSON string. """ return json.dumps(dataclasses.asdict(self), indent=2) @property def model_names(self) -> List[str]: if len(self.models) <= 0: raise ValueError( "Please make sure you provide at least one model name / model identifier, *e.g.* `--models" " google-bert/bert-base-cased` or `args.models = ['google-bert/bert-base-cased']." ) return self.models @property def do_multi_processing(self): if not self.multi_process: return False elif self.is_tpu: logger.info("Multiprocessing is currently not possible on TPU.") return False else: return True
class_definition
1,002
6,498
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/benchmark/benchmark_args_utils.py
null
317
class Frame(NamedTuple): """ `Frame` is a NamedTuple used to gather the current frame state. `Frame` has the following fields: - 'filename' (string): Name of the file currently executed - 'module' (string): Name of the module currently executed - 'line_number' (int): Number of the line currently executed - 'event' (string): Event that triggered the tracing (default will be "line") - 'line_text' (string): Text of the line in the python script """ filename: str module: str line_number: int event: str line_text: str
class_definition
3,544
4,136
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/benchmark/benchmark_utils.py
null
318
class UsedMemoryState(NamedTuple): """ `UsedMemoryState` are named tuples with the following fields: - 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current file, location in current file) - 'cpu_memory': CPU RSS memory state *before* executing the line - 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only `gpus_to_trace` if provided) """ frame: Frame cpu_memory: int gpu_memory: int
class_definition
4,139
4,676
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/benchmark/benchmark_utils.py
null
319
class Memory(NamedTuple): """ `Memory` NamedTuple have a single field `bytes` and you can get a human readable str of the number of mega bytes by calling `__repr__` - `byte` (integer): number of bytes, """ bytes: int def __repr__(self) -> str: return str(bytes_to_mega_bytes(self.bytes))
class_definition
4,679
5,009
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/benchmark/benchmark_utils.py
null
320
class MemoryState(NamedTuple): """ `MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields: - `frame` (`Frame`): the current frame (see above) - `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple - `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple - `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple """ frame: Frame cpu: Memory gpu: Memory cpu_gpu: Memory
class_definition
5,012
5,563
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/benchmark/benchmark_utils.py
null
321
class MemorySummary(NamedTuple): """ `MemorySummary` namedtuple otherwise with the fields: - `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace` by subtracting the memory after executing each line from the memory before executing said line. - `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each line obtained by summing repeated memory increase for a line if it's executed several times. The list is sorted from the frame with the largest memory consumption to the frame with the smallest (can be negative if memory is released) - `total`: total memory increase during the full tracing as a `Memory` named tuple (see below). Line with memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default). """ sequential: List[MemoryState] cumulative: List[MemoryState] current: List[MemoryState] total: Memory
class_definition
5,566
6,612
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/benchmark/benchmark_utils.py
null
322
class MemoryMeasureProcess(Process): """ `MemoryMeasureProcess` inherits from `Process` and overwrites its `run()` method. Used to measure the memory usage of a process """ def __init__(self, process_id: int, child_connection: Connection, interval: float): super().__init__() self.process_id = process_id self.interval = interval self.connection = child_connection self.num_measurements = 1 self.mem_usage = get_cpu_memory(self.process_id) def run(self): self.connection.send(0) stop = False while True: self.mem_usage = max(self.mem_usage, get_cpu_memory(self.process_id)) self.num_measurements += 1 if stop: break stop = self.connection.poll(self.interval) # send results to parent pipe self.connection.send(self.mem_usage) self.connection.send(self.num_measurements)
class_definition
8,480
9,622
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/benchmark/benchmark_utils.py
null
323
class Benchmark(ABC): """ Benchmarks is a simple but feature-complete benchmarking script to compare memory and time performance of models in Transformers. """ args: BenchmarkArguments configs: PretrainedConfig framework: str def __init__(self, args: BenchmarkArguments = None, configs: PretrainedConfig = None): self.args = args if configs is None: self.config_dict = { model_name: AutoConfig.from_pretrained(model_name) for model_name in self.args.model_names } else: self.config_dict = dict(zip(self.args.model_names, configs)) warnings.warn( f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils" " are deprecated in general and it is advised to use external Benchmarking libraries " " to benchmark Transformer models.", FutureWarning, ) if self.args.memory and os.getenv("TRANSFORMERS_USE_MULTIPROCESSING") == 0: logger.warning( "Memory consumption will not be measured accurately if `args.multi_process` is set to `False.` The" " flag 'TRANSFORMERS_USE_MULTIPROCESSING' should only be disabled for debugging / testing." ) self._print_fn = None self._framework_version = None self._environment_info = None @property def print_fn(self): if self._print_fn is None: if self.args.log_print: def print_and_log(*args): with open(self.args.log_filename, "a") as log_file: log_file.write("".join(args) + "\n") print(*args) self._print_fn = print_and_log else: self._print_fn = print return self._print_fn @property @abstractmethod def framework_version(self): pass @abstractmethod def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float: pass @abstractmethod def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float: pass @abstractmethod def _inference_memory( self, model_name: str, batch_size: int, sequence_length: int ) -> [Memory, Optional[MemorySummary]]: pass @abstractmethod def _train_memory( self, model_name: str, batch_size: int, sequence_length: int ) -> [Memory, Optional[MemorySummary]]: pass def inference_speed(self, *args, **kwargs) -> float: return separate_process_wrapper_fn(self._inference_speed, self.args.do_multi_processing)(*args, **kwargs) def train_speed(self, *args, **kwargs) -> float: return separate_process_wrapper_fn(self._train_speed, self.args.do_multi_processing)(*args, **kwargs) def inference_memory(self, *args, **kwargs) -> [Memory, Optional[MemorySummary]]: return separate_process_wrapper_fn(self._inference_memory, self.args.do_multi_processing)(*args, **kwargs) def train_memory(self, *args, **kwargs) -> [Memory, Optional[MemorySummary]]: return separate_process_wrapper_fn(self._train_memory, self.args.do_multi_processing)(*args, **kwargs) def run(self): result_dict = {model_name: {} for model_name in self.args.model_names} inference_result_time = copy.deepcopy(result_dict) inference_result_memory = copy.deepcopy(result_dict) train_result_time = copy.deepcopy(result_dict) train_result_memory = copy.deepcopy(result_dict) for c, model_name in enumerate(self.args.model_names): self.print_fn(f"{c + 1} / {len(self.args.model_names)}") model_dict = { "bs": self.args.batch_sizes, "ss": self.args.sequence_lengths, "result": {i: {} for i in self.args.batch_sizes}, } inference_result_time[model_name] = copy.deepcopy(model_dict) inference_result_memory[model_name] = copy.deepcopy(model_dict) train_result_time[model_name] = copy.deepcopy(model_dict) train_result_memory[model_name] = copy.deepcopy(model_dict) inference_summary = train_summary = None for batch_size in self.args.batch_sizes: for sequence_length in self.args.sequence_lengths: if self.args.inference: if self.args.memory: memory, inference_summary = self.inference_memory(model_name, batch_size, sequence_length) inference_result_memory[model_name]["result"][batch_size][sequence_length] = memory if self.args.speed: time = self.inference_speed(model_name, batch_size, sequence_length) inference_result_time[model_name]["result"][batch_size][sequence_length] = time if self.args.training: if self.args.memory: memory, train_summary = self.train_memory(model_name, batch_size, sequence_length) train_result_memory[model_name]["result"][batch_size][sequence_length] = memory if self.args.speed: time = self.train_speed(model_name, batch_size, sequence_length) train_result_time[model_name]["result"][batch_size][sequence_length] = time if self.args.inference: if self.args.speed: self.print_fn("\n" + 20 * "=" + ("INFERENCE - SPEED - RESULT").center(40) + 20 * "=") self.print_results(inference_result_time, type_label="Time in s") self.save_to_csv(inference_result_time, self.args.inference_time_csv_file) if self.args.is_tpu: self.print_fn( "TPU was used for inference. Note that the time after compilation stabilized (after ~10" " inferences model.forward(..) calls) was measured." ) if self.args.memory: self.print_fn("\n" + 20 * "=" + ("INFERENCE - MEMORY - RESULT").center(40) + 20 * "=") self.print_results(inference_result_memory, type_label="Memory in MB") self.save_to_csv(inference_result_memory, self.args.inference_memory_csv_file) if self.args.trace_memory_line_by_line: self.print_fn("\n" + 20 * "=" + ("INFERENCE - MEMOMRY - LINE BY LINE - SUMMARY").center(40) + 20 * "=") self.print_memory_trace_statistics(inference_summary) if self.args.training: if self.args.speed: self.print_fn("\n" + 20 * "=" + ("TRAIN - SPEED - RESULTS").center(40) + 20 * "=") self.print_results(train_result_time, "Time in s") self.save_to_csv(train_result_time, self.args.train_time_csv_file) if self.args.is_tpu: self.print_fn( "TPU was used for training. Note that the time after compilation stabilized (after ~10 train" " loss=model.forward(...) + loss.backward() calls) was measured." ) if self.args.memory: self.print_fn("\n" + 20 * "=" + ("TRAIN - MEMORY - RESULTS").center(40) + 20 * "=") self.print_results(train_result_memory, type_label="Memory in MB") self.save_to_csv(train_result_memory, self.args.train_memory_csv_file) if self.args.trace_memory_line_by_line: self.print_fn("\n" + 20 * "=" + ("TRAIN - MEMOMRY - LINE BY LINE - SUMMARY").center(40) + 20 * "=") self.print_memory_trace_statistics(train_summary) if self.args.env_print: self.print_fn("\n" + 20 * "=" + ("ENVIRONMENT INFORMATION").center(40) + 20 * "=") self.print_fn("\n".join([f"- {prop}: {val}" for prop, val in self.environment_info.items()]) + "\n") if self.args.save_to_csv: with open(self.args.env_info_csv_file, mode="w", newline="") as csv_file: writer = csv.writer(csv_file) for key, value in self.environment_info.items(): writer.writerow([key, value]) return BenchmarkOutput( inference_result_time, inference_result_memory, train_result_time, train_result_memory, inference_summary, train_summary, ) @property def environment_info(self): if self._environment_info is None: info = {} info["transformers_version"] = version info["framework"] = self.framework if self.framework == "PyTorch": info["use_torchscript"] = self.args.torchscript if self.framework == "TensorFlow": info["eager_mode"] = self.args.eager_mode info["use_xla"] = self.args.use_xla info["framework_version"] = self.framework_version info["python_version"] = platform.python_version() info["system"] = platform.system() info["cpu"] = platform.processor() info["architecture"] = platform.architecture()[0] info["date"] = datetime.date(datetime.now()) info["time"] = datetime.time(datetime.now()) info["fp16"] = self.args.fp16 info["use_multiprocessing"] = self.args.do_multi_processing info["only_pretrain_model"] = self.args.only_pretrain_model if is_psutil_available(): info["cpu_ram_mb"] = bytes_to_mega_bytes(psutil.virtual_memory().total) else: logger.warning( "Psutil not installed, we won't log available CPU memory. " "Install psutil (pip install psutil) to log available CPU memory." ) info["cpu_ram_mb"] = "N/A" info["use_gpu"] = self.args.is_gpu if self.args.is_gpu: info["num_gpus"] = 1 # TODO(PVP) Currently only single GPU is supported if is_py3nvml_available(): nvml.nvmlInit() handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx) info["gpu"] = nvml.nvmlDeviceGetName(handle) info["gpu_ram_mb"] = bytes_to_mega_bytes(nvml.nvmlDeviceGetMemoryInfo(handle).total) info["gpu_power_watts"] = nvml.nvmlDeviceGetPowerManagementLimit(handle) / 1000 info["gpu_performance_state"] = nvml.nvmlDeviceGetPerformanceState(handle) nvml.nvmlShutdown() else: logger.warning( "py3nvml not installed, we won't log GPU memory usage. " "Install py3nvml (pip install py3nvml) to log information about GPU." ) info["gpu"] = "N/A" info["gpu_ram_mb"] = "N/A" info["gpu_power_watts"] = "N/A" info["gpu_performance_state"] = "N/A" info["use_tpu"] = self.args.is_tpu # TODO(PVP): See if we can add more information about TPU # see: https://github.com/pytorch/xla/issues/2180 self._environment_info = info return self._environment_info def print_results(self, result_dict, type_label): self.print_fn(80 * "-") self.print_fn( "Model Name".center(30) + "Batch Size".center(15) + "Seq Length".center(15) + type_label.center(15) ) self.print_fn(80 * "-") for model_name in self.args.model_names: for batch_size in result_dict[model_name]["bs"]: for sequence_length in result_dict[model_name]["ss"]: result = result_dict[model_name]["result"][batch_size][sequence_length] if isinstance(result, float): result = round(1000 * result) / 1000 result = "< 0.001" if result == 0.0 else str(result) else: result = str(result) self.print_fn( model_name[:30].center(30) + str(batch_size).center(15), str(sequence_length).center(15), result.center(15), ) self.print_fn(80 * "-") def print_memory_trace_statistics(self, summary: MemorySummary): self.print_fn( "\nLine by line memory consumption:\n" + "\n".join( f"{state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}" for state in summary.sequential ) ) self.print_fn( "\nLines with top memory consumption:\n" + "\n".join( f"=> {state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}" for state in summary.cumulative[:6] ) ) self.print_fn( "\nLines with lowest memory consumption:\n" + "\n".join( f"=> {state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}" for state in summary.cumulative[-6:] ) ) self.print_fn(f"\nTotal memory increase: {summary.total}") def save_to_csv(self, result_dict, filename): if not self.args.save_to_csv: return self.print_fn("Saving results to csv.") with open(filename, mode="w") as csv_file: if len(self.args.model_names) <= 0: raise ValueError(f"At least 1 model should be defined, but got {self.model_names}") fieldnames = ["model", "batch_size", "sequence_length"] writer = csv.DictWriter(csv_file, fieldnames=fieldnames + ["result"]) writer.writeheader() for model_name in self.args.model_names: result_dict_model = result_dict[model_name]["result"] for bs in result_dict_model: for ss in result_dict_model[bs]: result_model = result_dict_model[bs][ss] writer.writerow( { "model": model_name, "batch_size": bs, "sequence_length": ss, "result": ("{}" if not isinstance(result_model, float) else "{:.4f}").format( result_model ), } )
class_definition
22,684
37,598
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/benchmark/benchmark_utils.py
null
324
class TensorFlowBenchmarkArguments(BenchmarkArguments): deprecated_args = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__(self, **kwargs): """ This __init__ is there for legacy code. When removing deprecated args completely, the class can simply be deleted """ for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: positive_arg = deprecated_arg[3:] kwargs[positive_arg] = not kwargs.pop(deprecated_arg) logger.warning( f"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or" f" {positive_arg}={kwargs[positive_arg]}" ) self.tpu_name = kwargs.pop("tpu_name", self.tpu_name) self.device_idx = kwargs.pop("device_idx", self.device_idx) self.eager_mode = kwargs.pop("eager_mode", self.eager_mode) self.use_xla = kwargs.pop("use_xla", self.use_xla) super().__init__(**kwargs) tpu_name: str = field( default=None, metadata={"help": "Name of TPU"}, ) device_idx: int = field( default=0, metadata={"help": "CPU / GPU device index. Defaults to 0."}, ) eager_mode: bool = field(default=False, metadata={"help": "Benchmark models in eager model."}) use_xla: bool = field( default=False, metadata={ "help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`." }, ) @cached_property def _setup_tpu(self) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self, ["tf"]) tpu = None if self.tpu: try: if self.tpu_name: tpu = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name) else: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: tpu = None return tpu @cached_property def _setup_strategy(self) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self, ["tf"]) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu) strategy = tf.distribute.TPUStrategy(self._setup_tpu) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx], "GPU") strategy = tf.distribute.OneDeviceStrategy(device=f"/gpu:{self.device_idx}") else: tf.config.set_visible_devices([], "GPU") # disable GPU strategy = tf.distribute.OneDeviceStrategy(device=f"/cpu:{self.device_idx}") return strategy @property def is_tpu(self) -> bool: requires_backends(self, ["tf"]) return self._setup_tpu is not None @property def strategy(self) -> "tf.distribute.Strategy": requires_backends(self, ["tf"]) return self._setup_strategy @property def gpu_list(self): requires_backends(self, ["tf"]) return tf.config.list_physical_devices("GPU") @property def n_gpu(self) -> int: requires_backends(self, ["tf"]) if self.cuda: return len(self.gpu_list) return 0 @property def is_gpu(self) -> bool: return self.n_gpu > 0
class_definition
976
4,734
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/benchmark/benchmark_args_tf.py
null
325
class SelectiveScanFn(torch.autograd.Function): @staticmethod def forward( ctx, u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False, return_last_state=False ): if u.stride(-1) != 1: u = u.contiguous() if delta.stride(-1) != 1: delta = delta.contiguous() if D is not None: D = D.contiguous() if B.stride(-1) != 1: B = B.contiguous() if C.stride(-1) != 1: C = C.contiguous() if z is not None and z.stride(-1) != 1: z = z.contiguous() if B.dim() == 3: B = rearrange(B, "b dstate l -> b 1 dstate l") ctx.squeeze_B = True if C.dim() == 3: C = rearrange(C, "b dstate l -> b 1 dstate l") ctx.squeeze_C = True out, x, *rest = selective_scan_cuda.fwd(u, delta, A, B, C, D, z, delta_bias, delta_softplus) ctx.delta_softplus = delta_softplus ctx.has_z = z is not None last_state = x[:, :, -1, 1::2] # (batch, dim, dstate) if not ctx.has_z: ctx.save_for_backward(u, delta, A, B, C, D, delta_bias, x) return out if not return_last_state else (out, last_state) else: ctx.save_for_backward(u, delta, A, B, C, D, z, delta_bias, x, out) out_z = rest[0] return out_z if not return_last_state else (out_z, last_state) @staticmethod def backward(ctx, dout, *args): if not ctx.has_z: u, delta, A, B, C, D, delta_bias, x = ctx.saved_tensors z = None out = None else: u, delta, A, B, C, D, z, delta_bias, x, out = ctx.saved_tensors if dout.stride(-1) != 1: dout = dout.contiguous() # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the # backward of selective_scan_cuda with the backward of chunk). # Here we just pass in None and dz will be allocated in the C++ code. du, ddelta, dA, dB, dC, dD, ddelta_bias, *rest = selective_scan_cuda.bwd( u, delta, A, B, C, D, z, delta_bias, dout, x, out, None, ctx.delta_softplus, False, # option to recompute out_z, not used here ) dz = rest[0] if ctx.has_z else None dB = dB.squeeze(1) if getattr(ctx, "squeeze_B", False) else dB dC = dC.squeeze(1) if getattr(ctx, "squeeze_C", False) else dC return ( du, ddelta, dA, dB, dC, dD if D is not None else None, dz, ddelta_bias if delta_bias is not None else None, None, None, )
class_definition
1,336
4,206
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/kernels/falcon_mamba/selective_scan_with_ln_interface.py
null
326
class MambaInnerFn(torch.autograd.Function): @staticmethod @custom_fwd def forward( ctx, xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, out_proj_weight, out_proj_bias, A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, C_proj_bias=None, delta_softplus=True, checkpoint_lvl=1, b_rms_weight=None, c_rms_weight=None, dt_rms_weight=None, b_c_dt_rms_eps=1e-6, ): """ xz: (batch, dim, seqlen) """ assert causal_conv1d_cuda is not None, "causal_conv1d_cuda is not available. Please install causal-conv1d." assert checkpoint_lvl in [0, 1] L = xz.shape[-1] delta_rank = delta_proj_weight.shape[1] d_state = A.shape[-1] * (1 if not A.is_complex() else 2) if torch.is_autocast_enabled(): x_proj_weight = x_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) delta_proj_weight = delta_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) out_proj_weight = out_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) out_proj_bias = ( out_proj_bias.to(dtype=torch.get_autocast_gpu_dtype()) if out_proj_bias is not None else None ) if xz.stride(-1) != 1: xz = xz.contiguous() conv1d_weight = rearrange(conv1d_weight, "d 1 w -> d w") x, z = xz.chunk(2, dim=1) conv1d_bias = conv1d_bias.contiguous() if conv1d_bias is not None else None conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, None, None, None, True) # We're being very careful here about the layout, to avoid extra transposes. # We want delta to have d as the slowest moving dimension # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. x_dbl = F.linear(rearrange(conv1d_out, "b d l -> (b l) d"), x_proj_weight) # (bl d) delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), "d (b l) -> b d l", l=L) ctx.is_variable_B = B is None ctx.is_variable_C = C is None ctx.B_proj_bias_is_None = B_proj_bias is None ctx.C_proj_bias_is_None = C_proj_bias is None if B is None: # variable B B = x_dbl[:, delta_rank : delta_rank + d_state] # (bl dstate) if B_proj_bias is not None: B = B + B_proj_bias.to(dtype=B.dtype) if not A.is_complex(): # B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous() B = rearrange(B, "(b l) dstate -> b 1 dstate l", l=L).contiguous() else: B = rearrange(B, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() else: if B.stride(-1) != 1: B = B.contiguous() if C is None: # variable C C = x_dbl[:, -d_state:] # (bl dstate) if C_proj_bias is not None: C = C + C_proj_bias.to(dtype=C.dtype) if not A.is_complex(): # C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous() C = rearrange(C, "(b l) dstate -> b 1 dstate l", l=L).contiguous() else: C = rearrange(C, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() else: if C.stride(-1) != 1: C = C.contiguous() if D is not None: D = D.contiguous() if b_rms_weight is not None: B = rearrange(B, "b 1 dstate l -> (b l) dstate", l=L).contiguous() B = rms_norm_forward(B, b_rms_weight, bias=None, eps=b_c_dt_rms_eps) B = rearrange(B, "(b l) dstate -> b 1 dstate l", l=L).contiguous() if c_rms_weight is not None: C = rearrange(C, "b 1 dstate l -> (b l) dstate", l=L).contiguous() C = rms_norm_forward(C, c_rms_weight, bias=None, eps=b_c_dt_rms_eps) C = rearrange(C, "(b l) dstate -> b 1 dstate l", l=L).contiguous() if dt_rms_weight is not None: delta = rearrange(delta, "b d l -> (b l) d", l=L).contiguous() delta = rms_norm_forward(delta, dt_rms_weight, bias=None, eps=b_c_dt_rms_eps) delta = rearrange(delta, "(b l) d -> b d l", l=L).contiguous() out, scan_intermediates, out_z = selective_scan_cuda.fwd( conv1d_out, delta, A, B, C, D, z, delta_bias, delta_softplus ) ctx.delta_softplus = delta_softplus ctx.out_proj_bias_is_None = out_proj_bias is None ctx.checkpoint_lvl = checkpoint_lvl ctx.b_rms_weight = b_rms_weight ctx.c_rms_weight = c_rms_weight ctx.dt_rms_weight = dt_rms_weight ctx.b_c_dt_rms_eps = b_c_dt_rms_eps if checkpoint_lvl >= 1: # Will recompute conv1d_out and delta in the backward pass conv1d_out, delta = None, None ctx.save_for_backward( xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, delta_proj_weight, out_proj_weight, conv1d_out, delta, A, B, C, D, delta_bias, scan_intermediates, b_rms_weight, c_rms_weight, dt_rms_weight, out, ) return F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias) @staticmethod @custom_bwd def backward(ctx, dout): # dout: (batch, seqlen, dim) assert causal_conv1d_cuda is not None, "causal_conv1d_cuda is not available. Please install causal-conv1d." ( xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, delta_proj_weight, out_proj_weight, conv1d_out, delta, A, B, C, D, delta_bias, scan_intermediates, b_rms_weight, c_rms_weight, dt_rms_weight, out, ) = ctx.saved_tensors L = xz.shape[-1] delta_rank = delta_proj_weight.shape[1] d_state = A.shape[-1] * (1 if not A.is_complex() else 2) x, z = xz.chunk(2, dim=1) if dout.stride(-1) != 1: dout = dout.contiguous() if ctx.checkpoint_lvl == 1: conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, None, None, None, True) delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), "d (b l) -> b d l", l=L) if dt_rms_weight is not None: delta = rearrange(delta, "b d l -> (b l) d", l=L).contiguous() delta = rms_norm_forward(delta, ctx.dt_rms_weight, None, ctx.b_c_dt_rms_eps) delta = rearrange(delta, "(b l) d -> b d l", l=L).contiguous() if b_rms_weight is not None: # Recompute & RMSNorm B B = rearrange(B, "b 1 dstate l -> (b l) dstate", l=L).contiguous() B = rms_norm_forward(B, ctx.b_rms_weight, None, ctx.b_c_dt_rms_eps) B = rearrange(B, "(b l) dstate -> b 1 dstate l", l=L).contiguous() if c_rms_weight is not None: # Recompute & RMSNorm C C = rearrange(C, "b 1 dstate l -> (b l) dstate", l=L).contiguous() C = rms_norm_forward(C, ctx.c_rms_weight, None, ctx.b_c_dt_rms_eps) C = rearrange(C, "(b l) dstate -> b 1 dstate l", l=L).contiguous() # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the # backward of selective_scan_cuda with the backward of chunk). dxz = torch.empty_like(xz) # (batch, dim, seqlen) dx, dz = dxz.chunk(2, dim=1) dout = rearrange(dout, "b l e -> e (b l)") dout_y = rearrange(out_proj_weight.t() @ dout, "d (b l) -> b d l", l=L) dconv1d_out, ddelta, dA, dB, dC, dD, ddelta_bias, dz, out_z = selective_scan_cuda.bwd( conv1d_out, delta, A, B, C, D, z, delta_bias, dout_y, scan_intermediates, out, dz, ctx.delta_softplus, True, # option to recompute out_z ) dout_proj_weight = torch.einsum("eB,dB->ed", dout, rearrange(out_z, "b d l -> d (b l)")) dout_proj_bias = dout.sum(dim=(0, 1)) if not ctx.out_proj_bias_is_None else None dD = dD if D is not None else None dx_dbl = torch.empty_like(x_dbl) dB_proj_bias = None if ctx.is_variable_B: if not A.is_complex(): dB = rearrange(dB, "b 1 dstate l -> (b l) dstate").contiguous() else: dB = rearrange(dB, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() dB_proj_bias = dB.sum(0) if not ctx.B_proj_bias_is_None else None dx_dbl[:, delta_rank : delta_rank + d_state] = dB # (bl d) dB = None dC_proj_bias = None if ctx.is_variable_C: if not A.is_complex(): dC = rearrange(dC, "b 1 dstate l -> (b l) dstate").contiguous() else: dC = rearrange(dC, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() dC_proj_bias = dC.sum(0) if not ctx.C_proj_bias_is_None else None dx_dbl[:, -d_state:] = dC # (bl d) dC = None ddelta = rearrange(ddelta, "b d l -> d (b l)") ddelta_proj_weight = torch.einsum("dB,Br->dr", ddelta, x_dbl[:, :delta_rank]) dx_dbl[:, :delta_rank] = torch.einsum("dB,dr->Br", ddelta, delta_proj_weight) dconv1d_out = rearrange(dconv1d_out, "b d l -> d (b l)") dx_proj_weight = torch.einsum("Br,Bd->rd", dx_dbl, rearrange(conv1d_out, "b d l -> (b l) d")) dconv1d_out = torch.addmm(dconv1d_out, x_proj_weight.t(), dx_dbl.t(), out=dconv1d_out) dconv1d_out = rearrange(dconv1d_out, "d (b l) -> b d l", b=x.shape[0], l=x.shape[-1]) # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the # backward of conv1d with the backward of chunk). dx, dconv1d_weight, dconv1d_bias, *_ = causal_conv1d_cuda.causal_conv1d_bwd( x, conv1d_weight, conv1d_bias, dconv1d_out, None, None, None, dx, False, True ) dconv1d_bias = dconv1d_bias if conv1d_bias is not None else None dconv1d_weight = rearrange(dconv1d_weight, "d w -> d 1 w") return ( dxz, dconv1d_weight, dconv1d_bias, dx_proj_weight, ddelta_proj_weight, dout_proj_weight, dout_proj_bias, dA, dB, dC, dD, ddelta_bias if delta_bias is not None else None, # 6-None are delta_softplus, checkpoint_lvl, b_rms_weight, c_rms_weight, dt_rms_weight, b_c_dt_rms_eps dB_proj_bias, dC_proj_bias, None, None, None, None, None, None, )
class_definition
7,500
18,893
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/kernels/falcon_mamba/selective_scan_with_ln_interface.py
null
327
class PatchingSpec: """ Data class that holds patching specifications. Args: o: Module / object where the op to patch is located name: Name of the op to monkey patch custom_op: Custom op that patches the original op orig_op: Original op that is being patched op_wrapper: Wrapper (optional) that wraps both the original and custom ops. It is useful for ops that are class or static methods for instance. """ o: Any name: str custom_op: Callable orig_op: Optional[Callable] = None op_wrapper: Optional[Callable] = None
class_definition
1,543
2,149
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/onnx/config.py
null
328
class OnnxConfig(ABC): """ Base class for ONNX exportable model describing metadata on how to export the model through the ONNX format. """ default_fixed_batch = 2 default_fixed_sequence = 8 default_fixed_num_choices = 4 torch_onnx_minimum_version = version.parse("1.8") _tasks_to_common_outputs = { "causal-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), "default": OrderedDict({"last_hidden_state": {0: "batch", 1: "sequence"}}), "image-classification": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), "image-segmentation": OrderedDict( { "logits": {0: "batch", 1: "sequence"}, "pred_boxes": {0: "batch", 1: "sequence"}, "pred_masks": {0: "batch", 1: "sequence"}, } ), "masked-im": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), "masked-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), "multiple-choice": OrderedDict({"logits": {0: "batch"}}), "object-detection": OrderedDict( { "logits": {0: "batch", 1: "sequence"}, "pred_boxes": {0: "batch", 1: "sequence"}, } ), "question-answering": OrderedDict( { "start_logits": {0: "batch", 1: "sequence"}, "end_logits": {0: "batch", 1: "sequence"}, } ), "semantic-segmentation": OrderedDict({"logits": {0: "batch", 1: "num_labels", 2: "height", 3: "width"}}), "seq2seq-lm": OrderedDict({"logits": {0: "batch", 1: "decoder_sequence"}}), "sequence-classification": OrderedDict({"logits": {0: "batch"}}), "token-classification": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), "vision2seq-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), "speech2seq-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), } def __init__(self, config: "PretrainedConfig", task: str = "default", patching_specs: List[PatchingSpec] = None): self._config = config if task not in self._tasks_to_common_outputs: raise ValueError( f"{task} is not a supported task, supported tasks: {self._tasks_to_common_outputs.keys()}" ) self.task = task self._patching_specs = [] for spec in patching_specs if patching_specs is not None else []: final_spec = spec if spec.orig_op is None: final_spec = dataclasses.replace(spec, orig_op=getattr(spec.o, spec.name)) self._patching_specs.append(final_spec) @classmethod def from_model_config(cls, config: "PretrainedConfig", task: str = "default") -> "OnnxConfig": """ Instantiate a OnnxConfig for a specific model Args: config: The model's configuration to use when exporting to ONNX Returns: OnnxConfig for this model """ return cls(config, task=task) @property @abstractmethod def inputs(self) -> Mapping[str, Mapping[int, str]]: """ Mapping containing the axis definition of the input tensors to provide to the model Returns: For each input: its name associated to the axes symbolic name and the axis position within the tensor """ raise NotImplementedError() @property def outputs(self) -> Mapping[str, Mapping[int, str]]: """ Mapping containing the axis definition of the output tensors to provide to the model Returns: For each output: its name associated to the axes symbolic name and the axis position within the tensor """ common_outputs = self._tasks_to_common_outputs[self.task] return copy.deepcopy(common_outputs) @property def values_override(self) -> Optional[Mapping[str, Any]]: """ Dictionary of keys to override in the model's config before exporting Returns: Dictionary with the keys (and their corresponding values) to override """ if hasattr(self._config, "use_cache"): return {"use_cache": False} return None @property def default_batch_size(self) -> int: """ The default batch size to use if no other indication Returns: Integer > 0 """ # Using 2 avoid ONNX making assumption about single sample batch return OnnxConfig.default_fixed_batch @property def default_sequence_length(self) -> int: """ The default sequence length to use if no other indication Returns: Integer > 0 """ return OnnxConfig.default_fixed_sequence @property def default_num_choices(self) -> int: """ The default number of choices to use if no other indication Returns: Integer > 0 """ return OnnxConfig.default_fixed_num_choices @property def default_onnx_opset(self) -> int: """ Which onnx opset to use when exporting the model Returns: Integer ONNX Opset version """ return DEFAULT_ONNX_OPSET @property def atol_for_validation(self) -> float: """ What absolute tolerance value to use during model conversion validation. Returns: Float absolute tolerance value. """ return 1e-5 @property def is_torch_support_available(self) -> bool: """ The minimum PyTorch version required to export the model. Returns: `bool`: Whether the installed version of PyTorch is compatible with the model. """ if is_torch_available(): from transformers.utils import get_torch_version return version.parse(get_torch_version()) >= self.torch_onnx_minimum_version else: return False @staticmethod def use_external_data_format(num_parameters: int) -> bool: """ Flag indicating if the model requires using external data format Args: num_parameters: Number of parameter on the model Returns: True if model.num_parameters() * size_of(float32) >= 2Gb False otherwise """ return ( compute_serialized_parameters_size(num_parameters, ParameterFormat.Float) >= EXTERNAL_DATA_FORMAT_SIZE_LIMIT ) def _generate_dummy_images( self, batch_size: int = 2, num_channels: int = 3, image_height: int = 40, image_width: int = 40 ): images = [] for _ in range(batch_size): data = np.random.rand(image_height, image_width, num_channels) * 255 images.append(Image.fromarray(data.astype("uint8")).convert("RGB")) return images def _generate_dummy_audio( self, batch_size: int = 2, sampling_rate: int = 22050, time_duration: float = 5.0, frequency: int = 220 ): audio_data = [] for _ in range(batch_size): # time variable t = np.linspace(0, time_duration, int(time_duration * sampling_rate), endpoint=False) # generate pure sine wave at `frequency` Hz audio_data.append(0.5 * np.sin(2 * np.pi * frequency * t)) return audio_data def generate_dummy_inputs( self, preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin", "ImageProcessingMixin"], batch_size: int = -1, seq_length: int = -1, num_choices: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, num_channels: int = 3, image_width: int = 40, image_height: int = 40, sampling_rate: int = 22050, time_duration: float = 5.0, frequency: int = 220, tokenizer: "PreTrainedTokenizerBase" = None, ) -> Mapping[str, Any]: """ Generate inputs to provide to the ONNX exporter for the specific framework Args: preprocessor: ([`PreTrainedTokenizerBase`], [`FeatureExtractionMixin`], or [`ImageProcessingMixin`]): The preprocessor associated with this model configuration. batch_size (`int`, *optional*, defaults to -1): The batch size to export the model for (-1 means dynamic axis). num_choices (`int`, *optional*, defaults to -1): The number of candidate answers provided for multiple choice task (-1 means dynamic axis). seq_length (`int`, *optional*, defaults to -1): The sequence length to export the model for (-1 means dynamic axis). is_pair (`bool`, *optional*, defaults to `False`): Indicate if the input is a pair (sentence 1, sentence 2) framework (`TensorType`, *optional*, defaults to `None`): The framework (PyTorch or TensorFlow) that the tokenizer will generate tensors for. num_channels (`int`, *optional*, defaults to 3): The number of channels of the generated images. image_width (`int`, *optional*, defaults to 40): The width of the generated images. image_height (`int`, *optional*, defaults to 40): The height of the generated images. sampling_rate (`int`, *optional* defaults to 22050) The sampling rate for audio data generation. time_duration (`float`, *optional* defaults to 5.0) Total seconds of sampling for audio data generation. frequency (`int`, *optional* defaults to 220) The desired natural frequency of generated audio. Returns: Mapping[str, Tensor] holding the kwargs to provide to the model's forward function """ from ..feature_extraction_utils import FeatureExtractionMixin from ..image_processing_utils import ImageProcessingMixin from ..tokenization_utils_base import PreTrainedTokenizerBase if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: raise ValueError("You cannot provide both a tokenizer and a preprocessor to generate dummy inputs.") if tokenizer is not None: warnings.warn( "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use" " `preprocessor` instead.", FutureWarning, ) logger.warning("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.") preprocessor = tokenizer if isinstance(preprocessor, PreTrainedTokenizerBase): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension( batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX token_to_add = preprocessor.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension( seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add ) # Generate dummy inputs according to compute batch and sequence input_token = ( preprocessor.unk_token if (preprocessor.unk_token is not None and len(preprocessor.unk_token) > 0) else "0" ) dummy_input = [" ".join([input_token]) * seq_length] * batch_size if self.task == "multiple-choice": # If dynamic axis (-1) we forward with a fixed dimension of 4 candidate answers to avoid optimizations # made by ONNX num_choices = compute_effective_axis_dimension( num_choices, fixed_dimension=OnnxConfig.default_fixed_num_choices, num_token_to_add=0 ) dummy_input = dummy_input * num_choices # The shape of the tokenized inputs values is [batch_size * num_choices, seq_length] tokenized_input = preprocessor(dummy_input, text_pair=dummy_input) # Unflatten the tokenized inputs values expanding it to the shape [batch_size, num_choices, seq_length] for k, v in tokenized_input.items(): tokenized_input[k] = [v[i : i + num_choices] for i in range(0, len(v), num_choices)] return dict(tokenized_input.convert_to_tensors(tensor_type=framework)) return dict(preprocessor(dummy_input, return_tensors=framework)) elif isinstance(preprocessor, ImageProcessingMixin): if preprocessor.model_input_names[0] != "pixel_values": raise ValueError( f"The `preprocessor` is an image processor ({preprocessor.__class__.__name__}) and expects" f' `model_input_names[0]` to be "pixel_values", but got {preprocessor.model_input_names[0]}' ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width) return dict(preprocessor(images=dummy_input, return_tensors=framework)) elif isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width) return dict(preprocessor(images=dummy_input, return_tensors=framework)) elif ( isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == "input_features" ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) dummy_input = self._generate_dummy_audio(batch_size, sampling_rate, time_duration, frequency) return dict(preprocessor(dummy_input, return_tensors=framework)) else: raise ValueError( "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." ) def generate_dummy_inputs_onnxruntime(self, reference_model_inputs: Mapping[str, Any]) -> Mapping[str, Any]: """ Generate inputs for ONNX Runtime using the reference model inputs. Override this to run inference with seq2seq models which have the encoder and decoder exported as separate ONNX files. Args: reference_model_inputs ([`Mapping[str, Tensor]`): Reference inputs for the model. Returns: `Mapping[str, Tensor]`: The mapping holding the kwargs to provide to the model's forward function """ return reference_model_inputs def patch_ops(self): for spec in self._patching_specs: custom_op = spec.custom_op if spec.op_wrapper is None else spec.op_wrapper(spec.custom_op) setattr(spec.o, spec.name, custom_op) def restore_ops(self): for spec in self._patching_specs: orig_op = spec.orig_op if spec.op_wrapper is None else spec.op_wrapper(spec.orig_op) setattr(spec.o, spec.name, orig_op) @classmethod def flatten_output_collection_property(cls, name: str, field: Iterable[Any]) -> Dict[str, Any]: """ Flatten any potential nested structure expanding the name of the field with the index of the element within the structure. Args: name: The name of the nested structure field: The structure to, potentially, be flattened Returns: (Dict[str, Any]): Outputs with flattened structure and key mapping this new structure. """ from itertools import chain return {f"{name}.{idx}": item for idx, item in enumerate(chain.from_iterable(field))}
class_definition
2,152
18,850
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/onnx/config.py
null
329
class OnnxConfigWithPast(OnnxConfig, ABC): def __init__( self, config: "PretrainedConfig", task: str = "default", patching_specs: List[PatchingSpec] = None, use_past: bool = False, ): super().__init__(config, task=task, patching_specs=patching_specs) self.use_past = use_past @classmethod def with_past(cls, config: "PretrainedConfig", task: str = "default") -> "OnnxConfigWithPast": """ Instantiate a OnnxConfig with `use_past` attribute set to True Args: config: The underlying model's config to use when exporting to ONNX Returns: OnnxConfig with `.use_past = True` """ return cls(config, task=task, use_past=True) @property def outputs(self) -> Mapping[str, Mapping[int, str]]: common_outputs = super().outputs if self.use_past: self.fill_with_past_key_values_(common_outputs, direction="outputs") return common_outputs @property def values_override(self) -> Optional[Mapping[str, Any]]: if hasattr(self._config, "use_cache"): return {"use_cache": self.use_past} return None @property def num_layers(self) -> int: """ The number of layers attribute retrieved from the model config. Override this for model configs where the number of layers attribute is not called `num_layers`. """ if not hasattr(self._config, "num_layers"): raise AttributeError( "could not find the number of layers attribute in the model configuration, override the num_layers" " property of the model OnnxConfig to solve this" ) return self._config.num_layers @property def num_attention_heads(self) -> int: """ The number of attention heads attribute retrieved from the model config. Override this for model configs where the number of attention heads attribute is not called `num_attention_heads`. """ if not hasattr(self._config, "num_attention_heads"): raise AttributeError( "could not find the number of attention heads attribute in the model configuration, override the" " num_attention_heads property of the model OnnxConfig to solve this" ) return self._config.num_attention_heads def generate_dummy_inputs( self, tokenizer: "PreTrainedTokenizerBase", batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: # TODO: should we set seq_length = 1 when self.use_past = True? common_inputs = super().generate_dummy_inputs( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch batch, seqlen = common_inputs["input_ids"].shape # Not using the same length for past_key_values past_key_values_length = seqlen + 2 shape = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) if "attention_mask" in common_inputs: mask_dtype = common_inputs["attention_mask"].dtype common_inputs["attention_mask"] = torch.cat( [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1, ) common_inputs["past_key_values"] = [] for _ in range(self.num_layers): common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape))) return common_inputs def fill_with_past_key_values_( self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str, inverted_values_shape: bool = False ): """ Fill the input_or_outputs mapping with past_key_values dynamic axes considering. Args: inputs_or_outputs: The mapping to fill. direction: either "inputs" or "outputs", it specifies whether input_or_outputs is the input mapping or the output mapping, this is important for axes naming. inverted_values_shape: If `True`, store values on dynamic axis 1, else on axis 2. """ if direction not in ["inputs", "outputs"]: raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given') name = "past_key_values" if direction == "inputs" else "present" for i in range(self.num_layers): inputs_or_outputs[f"{name}.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} if inverted_values_shape: inputs_or_outputs[f"{name}.{i}.value"] = {0: "batch", 1: "past_sequence + sequence"} else: inputs_or_outputs[f"{name}.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} def _flatten_past_key_values_(self, flattened_output, name, idx, t): flattened_output[f"{name}.{idx}.key"] = t[0] flattened_output[f"{name}.{idx}.value"] = t[1] def flatten_output_collection_property(self, name: str, field: Iterable[Any]) -> Dict[str, Any]: flattened_output = {} if name in ["present", "past_key_values"]: for idx, t in enumerate(field): self._flatten_past_key_values_(flattened_output, name, idx, t) else: flattened_output = super().flatten_output_collection_property(name, field) return flattened_output
class_definition
18,853
24,825
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/onnx/config.py
null
330
class OnnxSeq2SeqConfigWithPast(OnnxConfigWithPast): @property def outputs(self) -> Mapping[str, Mapping[int, str]]: common_outputs = super(OnnxConfigWithPast, self).outputs # Renaming the outputs axes properly. for name, axes_names in common_outputs.items(): sequence_name = "encoder_sequence" if "encoder" in name else "decoder_sequence" for axis_idx, name in axes_names.items(): if "sequence" in name: axes_names[axis_idx] = sequence_name # We reset the value as the order in common_outputs (OrderedDict) is lost otherwise else: axes_names[axis_idx] = name if self.use_past: self.fill_with_past_key_values_(common_outputs, direction="outputs") return common_outputs @property def num_layers(self) -> Tuple[int]: try: num_layers = super().num_layers num_layers = (num_layers, num_layers) except AttributeError: if hasattr(self._config, "encoder_layers") and hasattr(self._config, "decoder_layers"): num_layers = (self._config.encoder_layers, self._config.decoder_layers) else: raise AttributeError( "could not find the number of encoder and decoder layers attributes in the model configuration," " override the num_layers property of the model OnnxConfig to solve this" ) return num_layers @property def num_attention_heads(self) -> Tuple[int]: try: num_attention_heads = super().num_attention_heads num_attention_heads = (num_attention_heads, num_attention_heads) except AttributeError: if hasattr(self._config, "encoder_attention_heads") and hasattr(self._config, "decoder_attention_heads"): num_attention_heads = (self._config.encoder_attention_heads, self._config.decoder_attention_heads) else: raise AttributeError( "could not find the number of attention heads for the encoder and the decoder attributes in the" " model configuration, override the num_attention_heads property of the model OnnxConfig to solve" " this" ) return num_attention_heads def generate_dummy_inputs( self, tokenizer: "PreTrainedTokenizerBase", batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: encoder_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) # Generate decoder inputs decoder_seq_length = seq_length if not self.use_past else 1 decoder_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs( tokenizer, batch_size=batch_size, seq_length=decoder_seq_length, is_pair=is_pair, framework=framework ) decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} common_inputs = dict(**encoder_inputs, **decoder_inputs) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch batch = common_inputs["input_ids"].shape[0] encoder_seq_length = common_inputs["input_ids"].shape[1] decoder_seq_length = common_inputs["decoder_input_ids"].shape[1] num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads encoder_shape = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) decoder_shape = ( batch, num_decoder_attention_heads, # Not using the same length for past_key_values decoder_seq_length + 3, self._config.hidden_size // num_decoder_attention_heads, ) common_inputs["past_key_values"] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered num_encoder_layers, num_decoder_layers = self.num_layers min_num_layers = min(num_encoder_layers, num_decoder_layers) max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(min_num_layers): # For encoder-decoder models, past_key_values contains pre-computed values for both the encoder and the # decoder layers, hence a tuple of 4 tensors instead of 2 common_inputs["past_key_values"].append( ( torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape), ) ) # TODO: test this. shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(min_num_layers, max_num_layers): common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape))) return common_inputs def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str): if direction not in ["inputs", "outputs"]: raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given') name = "past_key_values" if direction == "inputs" else "present" # If the number of encoder and decoder layers are present in the model configuration, both are considered num_encoder_layers, num_decoder_layers = self.num_layers min_num_layers = min(num_encoder_layers, num_decoder_layers) max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" encoder_sequence = "past_encoder_sequence" decoder_sequence = "past_decoder_sequence" if direction == "inputs" else "past_decoder_sequence + sequence" for i in range(min_num_layers): inputs_or_outputs[f"{name}.{i}.decoder.key"] = {0: "batch", 2: decoder_sequence} inputs_or_outputs[f"{name}.{i}.decoder.value"] = {0: "batch", 2: decoder_sequence} inputs_or_outputs[f"{name}.{i}.encoder.key"] = {0: "batch", 2: encoder_sequence} inputs_or_outputs[f"{name}.{i}.encoder.value"] = {0: "batch", 2: encoder_sequence} for i in range(min_num_layers, max_num_layers): if remaining_side_name == "encoder": axes_info = {0: "batch", 2: encoder_sequence} else: axes_info = {0: "batch", 2: decoder_sequence} inputs_or_outputs[f"{name}.{i}.{remaining_side_name}.key"] = axes_info def _flatten_past_key_values_(self, flattened_output, name, idx, t): flattened_output[f"{name}.{idx}.decoder.key"] = t[0] flattened_output[f"{name}.{idx}.decoder.value"] = t[1] flattened_output[f"{name}.{idx}.encoder.key"] = t[2] flattened_output[f"{name}.{idx}.encoder.value"] = t[3]
class_definition
24,828
32,555
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/onnx/config.py
null
331
class FeaturesManager: _TASKS_TO_AUTOMODELS = {} _TASKS_TO_TF_AUTOMODELS = {} if is_torch_available(): _TASKS_TO_AUTOMODELS = { "default": AutoModel, "masked-lm": AutoModelForMaskedLM, "causal-lm": AutoModelForCausalLM, "seq2seq-lm": AutoModelForSeq2SeqLM, "sequence-classification": AutoModelForSequenceClassification, "token-classification": AutoModelForTokenClassification, "multiple-choice": AutoModelForMultipleChoice, "object-detection": AutoModelForObjectDetection, "question-answering": AutoModelForQuestionAnswering, "image-classification": AutoModelForImageClassification, "image-segmentation": AutoModelForImageSegmentation, "masked-im": AutoModelForMaskedImageModeling, "semantic-segmentation": AutoModelForSemanticSegmentation, "vision2seq-lm": AutoModelForVision2Seq, "speech2seq-lm": AutoModelForSpeechSeq2Seq, } if is_tf_available(): _TASKS_TO_TF_AUTOMODELS = { "default": TFAutoModel, "masked-lm": TFAutoModelForMaskedLM, "causal-lm": TFAutoModelForCausalLM, "seq2seq-lm": TFAutoModelForSeq2SeqLM, "sequence-classification": TFAutoModelForSequenceClassification, "token-classification": TFAutoModelForTokenClassification, "multiple-choice": TFAutoModelForMultipleChoice, "question-answering": TFAutoModelForQuestionAnswering, "semantic-segmentation": TFAutoModelForSemanticSegmentation, } # Set of model topologies we support associated to the features supported by each topology and the factory _SUPPORTED_MODEL_TYPE = { "albert": supported_features_mapping( "default", "masked-lm", "sequence-classification", "multiple-choice", "token-classification", "question-answering", onnx_config_cls="models.albert.AlbertOnnxConfig", ), "bart": supported_features_mapping( "default", "default-with-past", "causal-lm", "causal-lm-with-past", "seq2seq-lm", "seq2seq-lm-with-past", "sequence-classification", "question-answering", onnx_config_cls="models.bart.BartOnnxConfig", ), # BEiT cannot be used with the masked image modeling autoclass, so this feature is excluded here "beit": supported_features_mapping( "default", "image-classification", onnx_config_cls="models.beit.BeitOnnxConfig" ), "bert": supported_features_mapping( "default", "masked-lm", "causal-lm", "sequence-classification", "multiple-choice", "token-classification", "question-answering", onnx_config_cls="models.bert.BertOnnxConfig", ), "big-bird": supported_features_mapping( "default", "masked-lm", "causal-lm", "sequence-classification", "multiple-choice", "token-classification", "question-answering", onnx_config_cls="models.big_bird.BigBirdOnnxConfig", ), "bigbird-pegasus": supported_features_mapping( "default", "default-with-past", "causal-lm", "causal-lm-with-past", "seq2seq-lm", "seq2seq-lm-with-past", "sequence-classification", "question-answering", onnx_config_cls="models.bigbird_pegasus.BigBirdPegasusOnnxConfig", ), "blenderbot": supported_features_mapping( "default", "default-with-past", "causal-lm", "causal-lm-with-past", "seq2seq-lm", "seq2seq-lm-with-past", onnx_config_cls="models.blenderbot.BlenderbotOnnxConfig", ), "blenderbot-small": supported_features_mapping( "default", "default-with-past", "causal-lm", "causal-lm-with-past", "seq2seq-lm", "seq2seq-lm-with-past", onnx_config_cls="models.blenderbot_small.BlenderbotSmallOnnxConfig", ), "bloom": supported_features_mapping( "default", "default-with-past", "causal-lm", "causal-lm-with-past", "sequence-classification", "token-classification", onnx_config_cls="models.bloom.BloomOnnxConfig", ), "camembert": supported_features_mapping( "default", "masked-lm", "causal-lm", "sequence-classification", "multiple-choice", "token-classification", "question-answering", onnx_config_cls="models.camembert.CamembertOnnxConfig", ), "clip": supported_features_mapping( "default", onnx_config_cls="models.clip.CLIPOnnxConfig", ), "codegen": supported_features_mapping( "default", "causal-lm", onnx_config_cls="models.codegen.CodeGenOnnxConfig", ), "convbert": supported_features_mapping( "default", "masked-lm", "sequence-classification", "multiple-choice", "token-classification", "question-answering", onnx_config_cls="models.convbert.ConvBertOnnxConfig", ), "convnext": supported_features_mapping( "default", "image-classification", onnx_config_cls="models.convnext.ConvNextOnnxConfig", ), "data2vec-text": supported_features_mapping( "default", "masked-lm", "sequence-classification", "multiple-choice", "token-classification", "question-answering", onnx_config_cls="models.data2vec.Data2VecTextOnnxConfig", ), "data2vec-vision": supported_features_mapping( "default", "image-classification", # ONNX doesn't support `adaptive_avg_pool2d` yet # "semantic-segmentation", onnx_config_cls="models.data2vec.Data2VecVisionOnnxConfig", ), "deberta": supported_features_mapping( "default", "masked-lm", "sequence-classification", "token-classification", "question-answering", onnx_config_cls="models.deberta.DebertaOnnxConfig", ), "deberta-v2": supported_features_mapping( "default", "masked-lm", "sequence-classification", "multiple-choice", "token-classification", "question-answering", onnx_config_cls="models.deberta_v2.DebertaV2OnnxConfig", ), "deit": supported_features_mapping( "default", "image-classification", onnx_config_cls="models.deit.DeiTOnnxConfig" ), "detr": supported_features_mapping( "default", "object-detection", "image-segmentation", onnx_config_cls="models.detr.DetrOnnxConfig", ), "distilbert": supported_features_mapping( "default", "masked-lm", "sequence-classification", "multiple-choice", "token-classification", "question-answering", onnx_config_cls="models.distilbert.DistilBertOnnxConfig", ), "electra": supported_features_mapping( "default", "masked-lm", "causal-lm", "sequence-classification", "multiple-choice", "token-classification", "question-answering", onnx_config_cls="models.electra.ElectraOnnxConfig", ), "flaubert": supported_features_mapping( "default", "masked-lm", "causal-lm", "sequence-classification", "multiple-choice", "token-classification", "question-answering", onnx_config_cls="models.flaubert.FlaubertOnnxConfig", ), "gpt2": supported_features_mapping( "default", "default-with-past", "causal-lm", "causal-lm-with-past", "sequence-classification", "token-classification", onnx_config_cls="models.gpt2.GPT2OnnxConfig", ), "gptj": supported_features_mapping( "default", "default-with-past", "causal-lm", "causal-lm-with-past", "question-answering", "sequence-classification", onnx_config_cls="models.gptj.GPTJOnnxConfig", ), "gpt-neo": supported_features_mapping( "default", "default-with-past", "causal-lm", "causal-lm-with-past", "sequence-classification", onnx_config_cls="models.gpt_neo.GPTNeoOnnxConfig", ), "groupvit": supported_features_mapping( "default", onnx_config_cls="models.groupvit.GroupViTOnnxConfig", ), "ibert": supported_features_mapping( "default", "masked-lm", "sequence-classification", "multiple-choice", "token-classification", "question-answering", onnx_config_cls="models.ibert.IBertOnnxConfig", ), "imagegpt": supported_features_mapping( "default", "image-classification", onnx_config_cls="models.imagegpt.ImageGPTOnnxConfig" ), "layoutlm": supported_features_mapping( "default", "masked-lm", "sequence-classification", "token-classification", onnx_config_cls="models.layoutlm.LayoutLMOnnxConfig", ), "layoutlmv3": supported_features_mapping( "default", "question-answering", "sequence-classification", "token-classification", onnx_config_cls="models.layoutlmv3.LayoutLMv3OnnxConfig", ), "levit": supported_features_mapping( "default", "image-classification", onnx_config_cls="models.levit.LevitOnnxConfig" ), "longt5": supported_features_mapping( "default", "default-with-past", "seq2seq-lm", "seq2seq-lm-with-past", onnx_config_cls="models.longt5.LongT5OnnxConfig", ), "longformer": supported_features_mapping( "default", "masked-lm", "multiple-choice", "question-answering", "sequence-classification", "token-classification", onnx_config_cls="models.longformer.LongformerOnnxConfig", ), "marian": supported_features_mapping( "default", "default-with-past", "seq2seq-lm", "seq2seq-lm-with-past", "causal-lm", "causal-lm-with-past", onnx_config_cls="models.marian.MarianOnnxConfig", ), "mbart": supported_features_mapping( "default", "default-with-past", "causal-lm", "causal-lm-with-past", "seq2seq-lm", "seq2seq-lm-with-past", "sequence-classification", "question-answering", onnx_config_cls="models.mbart.MBartOnnxConfig", ), "mobilebert": supported_features_mapping( "default", "masked-lm", "sequence-classification", "multiple-choice", "token-classification", "question-answering", onnx_config_cls="models.mobilebert.MobileBertOnnxConfig", ), "mobilenet-v1": supported_features_mapping( "default", "image-classification", onnx_config_cls="models.mobilenet_v1.MobileNetV1OnnxConfig", ), "mobilenet-v2": supported_features_mapping( "default", "image-classification", onnx_config_cls="models.mobilenet_v2.MobileNetV2OnnxConfig", ), "mobilevit": supported_features_mapping( "default", "image-classification", onnx_config_cls="models.mobilevit.MobileViTOnnxConfig", ), "mt5": supported_features_mapping( "default", "default-with-past", "seq2seq-lm", "seq2seq-lm-with-past", onnx_config_cls="models.mt5.MT5OnnxConfig", ), "m2m-100": supported_features_mapping( "default", "default-with-past", "seq2seq-lm", "seq2seq-lm-with-past", onnx_config_cls="models.m2m_100.M2M100OnnxConfig", ), "owlvit": supported_features_mapping( "default", onnx_config_cls="models.owlvit.OwlViTOnnxConfig", ), "perceiver": supported_features_mapping( "image-classification", "masked-lm", "sequence-classification", onnx_config_cls="models.perceiver.PerceiverOnnxConfig", ), "poolformer": supported_features_mapping( "default", "image-classification", onnx_config_cls="models.poolformer.PoolFormerOnnxConfig" ), "rembert": supported_features_mapping( "default", "masked-lm", "causal-lm", "sequence-classification", "multiple-choice", "token-classification", "question-answering", onnx_config_cls="models.rembert.RemBertOnnxConfig", ), "resnet": supported_features_mapping( "default", "image-classification", onnx_config_cls="models.resnet.ResNetOnnxConfig", ), "roberta": supported_features_mapping( "default", "masked-lm", "causal-lm", "sequence-classification", "multiple-choice", "token-classification", "question-answering", onnx_config_cls="models.roberta.RobertaOnnxConfig", ), "roformer": supported_features_mapping( "default", "masked-lm", "causal-lm", "sequence-classification", "token-classification", "multiple-choice", "question-answering", "token-classification", onnx_config_cls="models.roformer.RoFormerOnnxConfig", ), "segformer": supported_features_mapping( "default", "image-classification", "semantic-segmentation", onnx_config_cls="models.segformer.SegformerOnnxConfig", ), "squeezebert": supported_features_mapping( "default", "masked-lm", "sequence-classification", "multiple-choice", "token-classification", "question-answering", onnx_config_cls="models.squeezebert.SqueezeBertOnnxConfig", ), "swin": supported_features_mapping( "default", "image-classification", onnx_config_cls="models.swin.SwinOnnxConfig" ), "t5": supported_features_mapping( "default", "default-with-past", "seq2seq-lm", "seq2seq-lm-with-past", onnx_config_cls="models.t5.T5OnnxConfig", ), "vision-encoder-decoder": supported_features_mapping( "vision2seq-lm", onnx_config_cls="models.vision_encoder_decoder.VisionEncoderDecoderOnnxConfig" ), "vit": supported_features_mapping( "default", "image-classification", onnx_config_cls="models.vit.ViTOnnxConfig" ), "whisper": supported_features_mapping( "default", "default-with-past", "speech2seq-lm", "speech2seq-lm-with-past", onnx_config_cls="models.whisper.WhisperOnnxConfig", ), "xlm": supported_features_mapping( "default", "masked-lm", "causal-lm", "sequence-classification", "multiple-choice", "token-classification", "question-answering", onnx_config_cls="models.xlm.XLMOnnxConfig", ), "xlm-roberta": supported_features_mapping( "default", "masked-lm", "causal-lm", "sequence-classification", "multiple-choice", "token-classification", "question-answering", onnx_config_cls="models.xlm_roberta.XLMRobertaOnnxConfig", ), "yolos": supported_features_mapping( "default", "object-detection", onnx_config_cls="models.yolos.YolosOnnxConfig", ), } AVAILABLE_FEATURES = sorted(reduce(lambda s1, s2: s1 | s2, (v.keys() for v in _SUPPORTED_MODEL_TYPE.values()))) @staticmethod def get_supported_features_for_model_type( model_type: str, model_name: Optional[str] = None ) -> Dict[str, Callable[[PretrainedConfig], OnnxConfig]]: """ Tries to retrieve the feature -> OnnxConfig constructor map from the model type. Args: model_type (`str`): The model type to retrieve the supported features for. model_name (`str`, *optional*): The name attribute of the model object, only used for the exception message. Returns: The dictionary mapping each feature to a corresponding OnnxConfig constructor. """ model_type = model_type.lower() if model_type not in FeaturesManager._SUPPORTED_MODEL_TYPE: model_type_and_model_name = f"{model_type} ({model_name})" if model_name else model_type raise KeyError( f"{model_type_and_model_name} is not supported yet. " f"Only {list(FeaturesManager._SUPPORTED_MODEL_TYPE.keys())} are supported. " f"If you want to support {model_type} please propose a PR or open up an issue." ) return FeaturesManager._SUPPORTED_MODEL_TYPE[model_type] @staticmethod def feature_to_task(feature: str) -> str: return feature.replace("-with-past", "") @staticmethod def _validate_framework_choice(framework: str): """ Validates if the framework requested for the export is both correct and available, otherwise throws an exception. """ if framework not in ["pt", "tf"]: raise ValueError( f"Only two frameworks are supported for ONNX export: pt or tf, but {framework} was provided." ) elif framework == "pt" and not is_torch_available(): raise RuntimeError("Cannot export model to ONNX using PyTorch because no PyTorch package was found.") elif framework == "tf" and not is_tf_available(): raise RuntimeError("Cannot export model to ONNX using TensorFlow because no TensorFlow package was found.") @staticmethod def get_model_class_for_feature(feature: str, framework: str = "pt") -> Type: """ Attempts to retrieve an AutoModel class from a feature name. Args: feature (`str`): The feature required. framework (`str`, *optional*, defaults to `"pt"`): The framework to use for the export. Returns: The AutoModel class corresponding to the feature. """ task = FeaturesManager.feature_to_task(feature) FeaturesManager._validate_framework_choice(framework) if framework == "pt": task_to_automodel = FeaturesManager._TASKS_TO_AUTOMODELS else: task_to_automodel = FeaturesManager._TASKS_TO_TF_AUTOMODELS if task not in task_to_automodel: raise KeyError( f"Unknown task: {feature}. Possible values are {list(FeaturesManager._TASKS_TO_AUTOMODELS.values())}" ) return task_to_automodel[task] @staticmethod def determine_framework(model: str, framework: str = None) -> str: """ Determines the framework to use for the export. The priority is in the following order: 1. User input via `framework`. 2. If local checkpoint is provided, use the same framework as the checkpoint. 3. Available framework in environment, with priority given to PyTorch Args: model (`str`): The name of the model to export. framework (`str`, *optional*, defaults to `None`): The framework to use for the export. See above for priority if none provided. Returns: The framework to use for the export. """ if framework is not None: return framework framework_map = {"pt": "PyTorch", "tf": "TensorFlow"} exporter_map = {"pt": "torch", "tf": "tf2onnx"} if os.path.isdir(model): if os.path.isfile(os.path.join(model, WEIGHTS_NAME)): framework = "pt" elif os.path.isfile(os.path.join(model, TF2_WEIGHTS_NAME)): framework = "tf" else: raise FileNotFoundError( "Cannot determine framework from given checkpoint location." f" There should be a {WEIGHTS_NAME} for PyTorch" f" or {TF2_WEIGHTS_NAME} for TensorFlow." ) logger.info(f"Local {framework_map[framework]} model found.") else: if is_torch_available(): framework = "pt" elif is_tf_available(): framework = "tf" else: raise EnvironmentError("Neither PyTorch nor TensorFlow found in environment. Cannot export to ONNX.") logger.info(f"Framework not requested. Using {exporter_map[framework]} to export to ONNX.") return framework @staticmethod def get_model_from_feature( feature: str, model: str, framework: str = None, cache_dir: str = None ) -> Union["PreTrainedModel", "TFPreTrainedModel"]: """ Attempts to retrieve a model from a model's name and the feature to be enabled. Args: feature (`str`): The feature required. model (`str`): The name of the model to export. framework (`str`, *optional*, defaults to `None`): The framework to use for the export. See `FeaturesManager.determine_framework` for the priority should none be provided. Returns: The instance of the model. """ framework = FeaturesManager.determine_framework(model, framework) model_class = FeaturesManager.get_model_class_for_feature(feature, framework) try: model = model_class.from_pretrained(model, cache_dir=cache_dir) except OSError: if framework == "pt": logger.info("Loading TensorFlow model in PyTorch before exporting to ONNX.") model = model_class.from_pretrained(model, from_tf=True, cache_dir=cache_dir) else: logger.info("Loading PyTorch model in TensorFlow before exporting to ONNX.") model = model_class.from_pretrained(model, from_pt=True, cache_dir=cache_dir) return model @staticmethod def check_supported_model_or_raise( model: Union["PreTrainedModel", "TFPreTrainedModel"], feature: str = "default" ) -> Tuple[str, Callable]: """ Check whether or not the model has the requested features. Args: model: The model to export. feature: The name of the feature to check if it is available. Returns: (str) The type of the model (OnnxConfig) The OnnxConfig instance holding the model export properties. """ model_type = model.config.model_type.replace("_", "-") model_name = getattr(model, "name", "") model_features = FeaturesManager.get_supported_features_for_model_type(model_type, model_name=model_name) if feature not in model_features: raise ValueError( f"{model.config.model_type} doesn't support feature {feature}. Supported values are: {model_features}" ) return model.config.model_type, FeaturesManager._SUPPORTED_MODEL_TYPE[model_type][feature] def get_config(model_type: str, feature: str) -> OnnxConfig: """ Gets the OnnxConfig for a model_type and feature combination. Args: model_type (`str`): The model type to retrieve the config for. feature (`str`): The feature to retrieve the config for. Returns: `OnnxConfig`: config for the combination """ return FeaturesManager._SUPPORTED_MODEL_TYPE[model_type][feature]
class_definition
2,811
28,263
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/onnx/features.py
null
332
class ParameterFormat(Enum): Float = c_float @property def size(self) -> int: """ Number of byte required for this data type Returns: Integer > 0 """ return sizeof(self.value)
class_definition
822
1,063
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/onnx/utils.py
null
333
class ImageLoss(nn.Module): """ This class computes the losses for DetrForObjectDetection/DetrForSegmentation. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box). A note on the `num_classes` argument (copied from original repo in detr.py): "the naming of the `num_classes` parameter of the criterion is somewhat misleading. It indeed corresponds to `max_obj_id` + 1, where `max_obj_id` is the maximum id for a class in your dataset. For example, COCO has a `max_obj_id` of 90, so we pass `num_classes` to be 91. As another example, for a dataset that has a single class with `id` 1, you should pass `num_classes` to be 2 (`max_obj_id` + 1). For more details on this, check the following discussion https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223" Args: matcher (`DetrHungarianMatcher`): Module able to compute a matching between targets and proposals. num_classes (`int`): Number of object categories, omitting the special no-object category. eos_coef (`float`): Relative classification weight applied to the no-object category. losses (`List[str]`): List of all the losses to be applied. See `get_loss` for a list of all available losses. """ def __init__(self, matcher, num_classes, eos_coef, losses): super().__init__() self.matcher = matcher self.num_classes = num_classes self.eos_coef = eos_coef self.losses = losses empty_weight = torch.ones(self.num_classes + 1) empty_weight[-1] = self.eos_coef self.register_buffer("empty_weight", empty_weight) # removed logging parameter, which was part of the original implementation def loss_labels(self, outputs, targets, indices, num_boxes): """ Classification loss (NLL) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] """ if "logits" not in outputs: raise KeyError("No logits were found in the outputs") source_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full( source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device ) target_classes[idx] = target_classes_o loss_ce = nn.functional.cross_entropy(source_logits.transpose(1, 2), target_classes, self.empty_weight) losses = {"loss_ce": loss_ce} return losses @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. """ logits = outputs["logits"] device = logits.device target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {"cardinality_error": card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss. Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ if "pred_boxes" not in outputs: raise KeyError("No predicted boxes found in outputs") idx = self._get_source_permutation_idx(indices) source_boxes = outputs["pred_boxes"][idx] target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none") losses = {} losses["loss_bbox"] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag( generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes)) ) losses["loss_giou"] = loss_giou.sum() / num_boxes return losses def loss_masks(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the masks: the focal loss and the dice loss. Targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]. """ if "pred_masks" not in outputs: raise KeyError("No predicted masks found in outputs") source_idx = self._get_source_permutation_idx(indices) target_idx = self._get_target_permutation_idx(indices) source_masks = outputs["pred_masks"] source_masks = source_masks[source_idx] masks = [t["masks"] for t in targets] # TODO use valid to mask invalid areas due to padding in loss target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() target_masks = target_masks.to(source_masks) target_masks = target_masks[target_idx] # upsample predictions to the target size source_masks = nn.functional.interpolate( source_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False ) source_masks = source_masks[:, 0].flatten(1) target_masks = target_masks.flatten(1) target_masks = target_masks.view(source_masks.shape) losses = { "loss_mask": sigmoid_focal_loss(source_masks, target_masks, num_boxes), "loss_dice": dice_loss(source_masks, target_masks, num_boxes), } return losses def _get_source_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)]) source_idx = torch.cat([source for (source, _) in indices]) return batch_idx, source_idx def _get_target_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)]) target_idx = torch.cat([target for (_, target) in indices]) return batch_idx, target_idx def get_loss(self, loss, outputs, targets, indices, num_boxes): loss_map = { "labels": self.loss_labels, "cardinality": self.loss_cardinality, "boxes": self.loss_boxes, "masks": self.loss_masks, } if loss not in loss_map: raise ValueError(f"Loss {loss} not supported") return loss_map[loss](outputs, targets, indices, num_boxes) def forward(self, outputs, targets): """ This performs the loss computation. Args: outputs (`dict`, *optional*): Dictionary of tensors, see the output specification of the model for the format. targets (`List[dict]`, *optional*): List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the losses applied, see each loss' doc. """ outputs_without_aux = {k: v for k, v in outputs.items() if k != "auxiliary_outputs"} # Retrieve the matching between the outputs of the last layer and the targets indices = self.matcher(outputs_without_aux, targets) # Compute the average number of target boxes across all nodes, for normalization purposes num_boxes = sum(len(t["class_labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) world_size = 1 if is_accelerate_available(): if PartialState._shared_state != {}: num_boxes = reduce(num_boxes) world_size = PartialState().num_processes num_boxes = torch.clamp(num_boxes / world_size, min=1).item() # Compute all the requested losses losses = {} for loss in self.losses: losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if "auxiliary_outputs" in outputs: for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]): indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: if loss == "masks": # Intermediate masks losses are too costly to compute, we ignore them. continue l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) l_dict = {k + f"_{i}": v for k, v in l_dict.items()} losses.update(l_dict) return losses
class_definition
3,155
12,632
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_for_object_detection.py
null
334
class HungarianMatcher(nn.Module): """ This class computes an assignment between the targets and the predictions of the network. For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). Args: class_cost: The relative weight of the classification error in the matching cost. bbox_cost: The relative weight of the L1 error of the bounding box coordinates in the matching cost. giou_cost: The relative weight of the giou loss of the bounding box in the matching cost. """ def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1): super().__init__() requires_backends(self, ["scipy"]) self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost if class_cost == 0 and bbox_cost == 0 and giou_cost == 0: raise ValueError("All costs of the Matcher can't be 0") @torch.no_grad() def forward(self, outputs, targets): """ Args: outputs (`dict`): A dictionary that contains at least these entries: * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates. targets (`List[dict]`): A list of targets (len(targets) = batch_size), where each target is a dict containing: * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates. Returns: `List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) """ batch_size, num_queries = outputs["logits"].shape[:2] # We flatten to compute the cost matrices in a batch out_prob = outputs["logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes] out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] # Also concat the target labels and boxes target_ids = torch.cat([v["class_labels"] for v in targets]) target_bbox = torch.cat([v["boxes"] for v in targets]) # Compute the classification cost. Contrary to the loss, we don't use the NLL, # but approximate it in 1 - proba[target class]. # The 1 is a constant that doesn't change the matching, it can be ommitted. class_cost = -out_prob[:, target_ids] # Compute the L1 cost between boxes bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) # Compute the giou cost between boxes giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) # Final cost matrix cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v["boxes"]) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
class_definition
12,719
16,676
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_for_object_detection.py
null
335
class NestedTensor: def __init__(self, tensors, mask: Optional[Tensor]): self.tensors = tensors self.mask = mask def to(self, device): cast_tensor = self.tensors.to(device) mask = self.mask if mask is not None: cast_mask = mask.to(device) else: cast_mask = None return NestedTensor(cast_tensor, cast_mask) def decompose(self): return self.tensors, self.mask def __repr__(self): return str(self.tensors)
class_definition
19,545
20,062
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_for_object_detection.py
null
336
class DeformableDetrHungarianMatcher(HungarianMatcher): @torch.no_grad() def forward(self, outputs, targets): """ Differences: - out_prob = outputs["logits"].flatten(0, 1).sigmoid() instead of softmax - class_cost uses alpha and gamma """ batch_size, num_queries = outputs["logits"].shape[:2] # We flatten to compute the cost matrices in a batch out_prob = outputs["logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes] out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] # Also concat the target labels and boxes target_ids = torch.cat([v["class_labels"] for v in targets]) target_bbox = torch.cat([v["boxes"] for v in targets]) # Compute the classification cost. alpha = 0.25 gamma = 2.0 neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log()) pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log()) class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids] # Compute the L1 cost between boxes bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) # Compute the giou cost between boxes giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) # Final cost matrix cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v["boxes"]) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
class_definition
361
2,240
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_deformable_detr.py
null
337
class DeformableDetrImageLoss(ImageLoss): def __init__(self, matcher, num_classes, focal_alpha, losses): nn.Module.__init__(self) self.matcher = matcher self.num_classes = num_classes self.focal_alpha = focal_alpha self.losses = losses # removed logging parameter, which was part of the original implementation def loss_labels(self, outputs, targets, indices, num_boxes): """ Classification loss (Binary focal loss) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] """ if "logits" not in outputs: raise KeyError("No logits were found in the outputs") source_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full( source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device ) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros( [source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], dtype=source_logits.dtype, layout=source_logits.layout, device=source_logits.device, ) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:, :, :-1] loss_ce = ( sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * source_logits.shape[1] ) losses = {"loss_ce": loss_ce} return losses
class_definition
2,243
3,988
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_deformable_detr.py
null
338
class RTDetrHungarianMatcher(nn.Module): """This class computes an assignment between the targets and the predictions of the network For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). Args: config: RTDetrConfig """ def __init__(self, config): super().__init__() requires_backends(self, ["scipy"]) self.class_cost = config.matcher_class_cost self.bbox_cost = config.matcher_bbox_cost self.giou_cost = config.matcher_giou_cost self.use_focal_loss = config.use_focal_loss self.alpha = config.matcher_alpha self.gamma = config.matcher_gamma if self.class_cost == self.bbox_cost == self.giou_cost == 0: raise ValueError("All costs of the Matcher can't be 0") @torch.no_grad() def forward(self, outputs, targets): """Performs the matching Params: outputs: This is a dict that contains at least these entries: "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates Returns: A list of size batch_size, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) """ batch_size, num_queries = outputs["logits"].shape[:2] # We flatten to compute the cost matrices in a batch out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] # Also concat the target labels and boxes target_ids = torch.cat([v["class_labels"] for v in targets]) target_bbox = torch.cat([v["boxes"] for v in targets]) # Compute the classification cost. Contrary to the loss, we don't use the NLL, # but approximate it in 1 - proba[target class]. # The 1 is a constant that doesn't change the matching, it can be ommitted. if self.use_focal_loss: out_prob = F.sigmoid(outputs["logits"].flatten(0, 1)) out_prob = out_prob[:, target_ids] neg_cost_class = (1 - self.alpha) * (out_prob**self.gamma) * (-(1 - out_prob + 1e-8).log()) pos_cost_class = self.alpha * ((1 - out_prob) ** self.gamma) * (-(out_prob + 1e-8).log()) class_cost = pos_cost_class - neg_cost_class else: out_prob = outputs["logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes] class_cost = -out_prob[:, target_ids] # Compute the L1 cost between boxes bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) # Compute the giou cost betwen boxes giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) # Compute the final cost matrix cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v["boxes"]) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
class_definition
1,110
5,300
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
null
339
class RTDetrLoss(nn.Module): """ This class computes the losses for RTDetr. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box). Args: matcher (`DetrHungarianMatcher`): Module able to compute a matching between targets and proposals. weight_dict (`Dict`): Dictionary relating each loss with its weights. These losses are configured in RTDetrConf as `weight_loss_vfl`, `weight_loss_bbox`, `weight_loss_giou` losses (`List[str]`): List of all the losses to be applied. See `get_loss` for a list of all available losses. alpha (`float`): Parameter alpha used to compute the focal loss. gamma (`float`): Parameter gamma used to compute the focal loss. eos_coef (`float`): Relative classification weight applied to the no-object category. num_classes (`int`): Number of object categories, omitting the special no-object category. """ def __init__(self, config): super().__init__() self.matcher = RTDetrHungarianMatcher(config) self.num_classes = config.num_labels self.weight_dict = { "loss_vfl": config.weight_loss_vfl, "loss_bbox": config.weight_loss_bbox, "loss_giou": config.weight_loss_giou, } self.losses = ["vfl", "boxes"] self.eos_coef = config.eos_coefficient empty_weight = torch.ones(config.num_labels + 1) empty_weight[-1] = self.eos_coef self.register_buffer("empty_weight", empty_weight) self.alpha = config.focal_loss_alpha self.gamma = config.focal_loss_gamma def loss_labels_vfl(self, outputs, targets, indices, num_boxes, log=True): if "pred_boxes" not in outputs: raise KeyError("No predicted boxes found in outputs") if "logits" not in outputs: raise KeyError("No predicted logits found in outputs") idx = self._get_source_permutation_idx(indices) src_boxes = outputs["pred_boxes"][idx] target_boxes = torch.cat([_target["boxes"][i] for _target, (_, i) in zip(targets, indices)], dim=0) ious, _ = box_iou(center_to_corners_format(src_boxes), center_to_corners_format(target_boxes)) ious = torch.diag(ious).detach() src_logits = outputs["logits"] target_classes_original = torch.cat([_target["class_labels"][i] for _target, (_, i) in zip(targets, indices)]) target_classes = torch.full( src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device ) target_classes[idx] = target_classes_original target = F.one_hot(target_classes, num_classes=self.num_classes + 1)[..., :-1] target_score_original = torch.zeros_like(target_classes, dtype=src_logits.dtype) target_score_original[idx] = ious.to(target_score_original.dtype) target_score = target_score_original.unsqueeze(-1) * target pred_score = F.sigmoid(src_logits).detach() weight = self.alpha * pred_score.pow(self.gamma) * (1 - target) + target_score loss = F.binary_cross_entropy_with_logits(src_logits, target_score, weight=weight, reduction="none") loss = loss.mean(1).sum() * src_logits.shape[1] / num_boxes return {"loss_vfl": loss} def loss_labels(self, outputs, targets, indices, num_boxes, log=True): """Classification loss (NLL) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] """ if "logits" not in outputs: raise KeyError("No logits were found in the outputs") src_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_original = torch.cat([_target["class_labels"][i] for _target, (_, i) in zip(targets, indices)]) target_classes = torch.full( src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device ) target_classes[idx] = target_classes_original loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.class_weight) losses = {"loss_ce": loss_ce} return losses @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. """ logits = outputs["logits"] device = logits.device target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {"cardinality_error": card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss. Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ if "pred_boxes" not in outputs: raise KeyError("No predicted boxes found in outputs") idx = self._get_source_permutation_idx(indices) src_boxes = outputs["pred_boxes"][idx] target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0) losses = {} loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction="none") losses["loss_bbox"] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag( generalized_box_iou(center_to_corners_format(src_boxes), center_to_corners_format(target_boxes)) ) losses["loss_giou"] = loss_giou.sum() / num_boxes return losses def loss_masks(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the masks: the focal loss and the dice loss. Targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]. """ if "pred_masks" not in outputs: raise KeyError("No predicted masks found in outputs") source_idx = self._get_source_permutation_idx(indices) target_idx = self._get_target_permutation_idx(indices) source_masks = outputs["pred_masks"] source_masks = source_masks[source_idx] masks = [t["masks"] for t in targets] target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() target_masks = target_masks.to(source_masks) target_masks = target_masks[target_idx] # upsample predictions to the target size source_masks = nn.functional.interpolate( source_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False ) source_masks = source_masks[:, 0].flatten(1) target_masks = target_masks.flatten(1) target_masks = target_masks.view(source_masks.shape) losses = { "loss_mask": sigmoid_focal_loss(source_masks, target_masks, num_boxes), "loss_dice": dice_loss(source_masks, target_masks, num_boxes), } return losses def loss_labels_bce(self, outputs, targets, indices, num_boxes, log=True): src_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_original = torch.cat([_target["class_labels"][i] for _target, (_, i) in zip(targets, indices)]) target_classes = torch.full( src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device ) target_classes[idx] = target_classes_original target = F.one_hot(target_classes, num_classes=self.num_classes + 1)[..., :-1] loss = F.binary_cross_entropy_with_logits(src_logits, target * 1.0, reduction="none") loss = loss.mean(1).sum() * src_logits.shape[1] / num_boxes return {"loss_bce": loss} def _get_source_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)]) source_idx = torch.cat([source for (source, _) in indices]) return batch_idx, source_idx def _get_target_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)]) target_idx = torch.cat([target for (_, target) in indices]) return batch_idx, target_idx def loss_labels_focal(self, outputs, targets, indices, num_boxes, log=True): if "logits" not in outputs: raise KeyError("No logits found in outputs") src_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_original = torch.cat([_target["class_labels"][i] for _target, (_, i) in zip(targets, indices)]) target_classes = torch.full( src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device ) target_classes[idx] = target_classes_original target = F.one_hot(target_classes, num_classes=self.num_classes + 1)[..., :-1] loss = sigmoid_focal_loss(src_logits, target, self.alpha, self.gamma) loss = loss.mean(1).sum() * src_logits.shape[1] / num_boxes return {"loss_focal": loss} def get_loss(self, loss, outputs, targets, indices, num_boxes): loss_map = { "labels": self.loss_labels, "cardinality": self.loss_cardinality, "boxes": self.loss_boxes, "masks": self.loss_masks, "bce": self.loss_labels_bce, "focal": self.loss_labels_focal, "vfl": self.loss_labels_vfl, } if loss not in loss_map: raise ValueError(f"Loss {loss} not supported") return loss_map[loss](outputs, targets, indices, num_boxes) @staticmethod def get_cdn_matched_indices(dn_meta, targets): dn_positive_idx, dn_num_group = dn_meta["dn_positive_idx"], dn_meta["dn_num_group"] num_gts = [len(t["class_labels"]) for t in targets] device = targets[0]["class_labels"].device dn_match_indices = [] for i, num_gt in enumerate(num_gts): if num_gt > 0: gt_idx = torch.arange(num_gt, dtype=torch.int64, device=device) gt_idx = gt_idx.tile(dn_num_group) assert len(dn_positive_idx[i]) == len(gt_idx) dn_match_indices.append((dn_positive_idx[i], gt_idx)) else: dn_match_indices.append( ( torch.zeros(0, dtype=torch.int64, device=device), torch.zeros(0, dtype=torch.int64, device=device), ) ) return dn_match_indices def forward(self, outputs, targets): """ This performs the loss computation. Args: outputs (`dict`, *optional*): Dictionary of tensors, see the output specification of the model for the format. targets (`List[dict]`, *optional*): List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the losses applied, see each loss' doc. """ outputs_without_aux = {k: v for k, v in outputs.items() if "auxiliary_outputs" not in k} # Retrieve the matching between the outputs of the last layer and the targets indices = self.matcher(outputs_without_aux, targets) # Compute the average number of target boxes across all nodes, for normalization purposes num_boxes = sum(len(t["class_labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) num_boxes = torch.clamp(num_boxes, min=1).item() # Compute all the requested losses losses = {} for loss in self.losses: l_dict = self.get_loss(loss, outputs, targets, indices, num_boxes) l_dict = {k: l_dict[k] * self.weight_dict[k] for k in l_dict if k in self.weight_dict} losses.update(l_dict) # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if "auxiliary_outputs" in outputs: for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]): indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: if loss == "masks": # Intermediate masks losses are too costly to compute, we ignore them. continue l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) l_dict = {k: l_dict[k] * self.weight_dict[k] for k in l_dict if k in self.weight_dict} l_dict = {k + f"_aux_{i}": v for k, v in l_dict.items()} losses.update(l_dict) # In case of cdn auxiliary losses. For rtdetr if "dn_auxiliary_outputs" in outputs: if "denoising_meta_values" not in outputs: raise ValueError( "The output must have the 'denoising_meta_values` key. Please, ensure that 'outputs' includes a 'denoising_meta_values' entry." ) indices = self.get_cdn_matched_indices(outputs["denoising_meta_values"], targets) num_boxes = num_boxes * outputs["denoising_meta_values"]["dn_num_group"] for i, auxiliary_outputs in enumerate(outputs["dn_auxiliary_outputs"]): # indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: if loss == "masks": # Intermediate masks losses are too costly to compute, we ignore them. continue kwargs = {} l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k: l_dict[k] * self.weight_dict[k] for k in l_dict if k in self.weight_dict} l_dict = {k + f"_dn_{i}": v for k, v in l_dict.items()} losses.update(l_dict) return losses
class_definition
5,303
20,263
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/loss/loss_rt_detr.py
null
340
class CompressedTensorsHfQuantizer(HfQuantizer): """ Quantizer for the compressed_tensors package. Loads and restores models to quantized state with compressed_tensors """ requires_calibration = True required_packages = ["compressed_tensors"] def __init__(self, quantization_config: CompressedTensorsConfig, **kwargs): super().__init__(quantization_config, **kwargs) if not is_compressed_tensors_available(): raise ImportError( "Using `compressed_tensors` quantized models requires the compressed-tensors library: " "`pip install compressed-tensors`" ) from compressed_tensors.compressors import ModelCompressor self.compressor = ModelCompressor.from_compression_config(quantization_config) self.run_compressed = quantization_config.run_compressed self.quantization_config = quantization_config def validate_environment(self, *args, **kwargs): if not is_compressed_tensors_available(): raise ImportError( "Using `compressed_tensors` quantized models requires the compressed-tensors library: " "`pip install compressed-tensors`" ) if not is_torch_available(): # torch already should be installed as part of compressed tensors raise ImportError("torch is required for using compressed-tensors quantization") def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": if torch_dtype is None: logger.info("Loading model using torch.float16 for compressed-tensors quantization") torch_dtype = torch.float16 elif torch_dtype != torch.float16: logger.info( "We suggest you to set `torch_dtype=torch.float16` for better efficiency with compressed_tensors." ) return torch_dtype def _process_model_before_weight_loading(self, model, **kwargs): from compressed_tensors.quantization import apply_quantization_config ct_quantization_config = self.compressor.quantization_config if self.run_compressed and self.is_quantization_compressed: apply_quantization_config(model, ct_quantization_config, run_compressed=True) elif not self.is_quantization_compressed: apply_quantization_config(model, ct_quantization_config) def _process_model_after_weight_loading(self, model, **kwargs): """Decompress loaded model if necessary - need for qat""" if (self.is_quantization_compressed and not self.run_compressed) or self.is_sparsification_compressed: config = kwargs.get("config", None) cache_path = config._name_or_path if not os.path.exists(cache_path): from transformers.utils import cached_file config_file_path = cached_file(cache_path, "config.json") cache_path = os.path.sep.join(config_file_path.split(os.path.sep)[:-1]) if self.is_quantization_compressed and not self.run_compressed: from compressed_tensors.quantization import QuantizationStatus self.compressor.quantization_config.quantization_status = QuantizationStatus.FROZEN self.compressor.decompress(model_path=cache_path, model=model) @property def is_quantization_compressed(self): from compressed_tensors.quantization import QuantizationStatus return ( self.quantization_config.quantization_config is not None and self.quantization_config.quantization_config.quantization_status == QuantizationStatus.COMPRESSED ) @property def is_sparsification_compressed(self): from compressed_tensors.config.base import CompressionFormat return ( self.quantization_config.sparsity_config is not None and self.quantization_config.sparsity_config.format != CompressionFormat.dense.value ) @property def is_trainable(self): return True def is_qat_trainable(self) -> bool: """Loaded Models can carry out quantization aware training""" # models need to be decompressed carry out qat return not self.run_compressed or not self.is_quantization_compressed def is_serializable(self, safe_serialization=None) -> bool: """Models quantized using compressed tensors can be saved to disk""" return True
class_definition
884
5,366
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_compressed_tensors.py
null
341
class FbgemmFp8HfQuantizer(HfQuantizer): """ FP8 quantization using fbgemm kernels """ requires_parameters_quantization = True requires_calibration = False required_packages = ["fbgemm-gpu", "accelerate"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) self.quantization_config = quantization_config def validate_environment(self, *args, **kwargs): if not is_torch_available() or version.parse(importlib.metadata.version("torch")) < version.parse("2.1.0"): raise ImportError( "Using fbgemm fp8 quantization requires torch > 2.1.0" "Please install the latest version of torch ( pip install --upgrade torch )" ) if not is_fbgemm_gpu_available(): raise ImportError( "Using fbgemm fp8 quantization requires fbgemm-gpu library" "Please install the latest version of fbgemm-gpu library by following : https://pytorch.org/FBGEMM/fbgemm_gpu-development/InstallationInstructions.html#fbgemm-gpu-install-libraries" ) if not is_accelerate_available("0.32.2"): raise ImportError( "Loading an FP8 quantized model requires accelerate > 0.32.1 (`pip install --upgrade accelerate`)" ) if not torch.cuda.is_available(): raise RuntimeError("Using FP8 quantized models with fbgemm kernels requires a GPU") compute_capability = torch.cuda.get_device_capability() major, minor = compute_capability if major < 9: raise ValueError( "FP8 quantized models is only supported on GPUs with compute capability >= 9.0 (e.g H100)" ) device_map = kwargs.get("device_map", None) if device_map is None: logger.warning_once( "You have loaded an FP8 model on CPU and have a CUDA device available, make sure to set " "your model on a GPU device in order to run your model. To remove this warning, pass device_map = 'cuda'. " ) elif device_map is not None: if ( not self.pre_quantized and isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()) ): raise ValueError( "You are attempting to load an FP8 model with a device_map that contains a CPU or disk device." "This is not supported when the model is quantized on the fly. " "Please use a quantized checkpoint or remove the CPU or disk device from the device_map." ) def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": if torch_dtype is None: torch_dtype = torch.bfloat16 logger.info( "Overriding torch_dtype=%s with `torch_dtype=torch.bloat16` due to " "requirements of `fbgemm-gpu` to enable model loading in fp8. " "Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass" " torch_dtype=torch.bfloat16 to remove this warning.", torch_dtype, ) elif torch_dtype == torch.float16: raise ValueError( "You cannot use FP8 with torch_dtype=torch.float16." "We recommend you passing torch_dtype=torch.bfloat16" ) return torch_dtype def check_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: Dict[str, Any], **kwargs, ): from ..integrations import FbgemmFp8Linear module, tensor_name = get_module_from_name(model, param_name) if isinstance(module, FbgemmFp8Linear): if self.pre_quantized or tensor_name == "bias": if tensor_name == "weight" and param_value.dtype != torch.float8_e4m3fn: raise ValueError("Expect quantized weights but got an unquantized weight") return False else: if tensor_name == "weight_scale": raise ValueError("Expect unquantized weights but got a quantized weight_scale") return True return False def create_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", state_dict: Dict[str, Any], unexpected_keys: Optional[List[str]] = None, ): """ Quantizes weights into weight and weight_scale """ new_value, weight_scale = torch.ops.fbgemm.quantize_fp8_per_row(param_value) module, tensor_name = get_module_from_name(model, param_name) module._buffers[tensor_name] = new_value.to(target_device) # to have the right output shape -> (out_features, 1) module._buffers["weight_scale"] = weight_scale.view(weight_scale.shape[0], 1).to(target_device) if unexpected_keys is not None and param_name in unexpected_keys: unexpected_keys.remove(param_name) del param_name def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): return model def _process_model_before_weight_loading( self, model: "PreTrainedModel", device_map, keep_in_fp32_modules: List[str] = [], **kwargs, ): from ..integrations import get_keys_to_not_convert, replace_with_fbgemm_fp8_linear self.modules_to_not_convert = get_keys_to_not_convert(model) if self.quantization_config.modules_to_not_convert is not None: self.modules_to_not_convert.extend(self.quantization_config.modules_to_not_convert) model = replace_with_fbgemm_fp8_linear( model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config, pre_quantized=self.pre_quantized, ) model.config.quantization_config = self.quantization_config def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> List[str]: from ..integrations import FbgemmFp8Linear not_missing_keys = [] for name, module in model.named_modules(): if isinstance(module, FbgemmFp8Linear): for missing in missing_keys: if ( (name in missing or name in f"{prefix}.{missing}") and not missing.endswith(".weight") and not missing.endswith(".bias") ): not_missing_keys.append(missing) return [k for k in missing_keys if k not in not_missing_keys] def is_serializable(self, safe_serialization=None): return True @property def is_trainable(self) -> bool: return False
class_definition
1,055
8,141
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_fbgemm_fp8.py
null
342
class AwqQuantizer(HfQuantizer): """ 4-bit quantization for Activation-aware Weight Quantization(AWQ) (https://arxiv.org/abs/2306.00978) """ # AWQ requires data callibration - we support only inference requires_calibration = True required_packages = ["awq", "accelerate"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) def validate_environment(self, device_map, **kwargs): if not is_auto_awq_available(): raise ImportError("Loading an AWQ quantized model requires auto-awq library (`pip install autoawq`)") if not is_accelerate_available(): raise ImportError("Loading an AWQ quantized model requires accelerate (`pip install accelerate`)") if self.quantization_config.version == AWQLinearVersion.GEMM and not torch.cuda.is_available(): logger.warning_once("No CUDA found, replace GEMM with IPEX version to support non-cuda AWQ model.") self.quantization_config.version = AWQLinearVersion.IPEX if self.quantization_config.version == AWQLinearVersion.IPEX: if version.parse(importlib.metadata.version("autoawq")) < version.parse("0.2.6"): raise RuntimeError( "To use IPEX backend, you need autoawq>0.6.2. Please install the latest version or from source." ) if device_map is None: logger.warning_once( "You have loaded an AWQ model without setting device_map, please set 'cpu' or 'xpu' or 'auto'" ) elif isinstance(device_map, dict) and "disk" in device_map.values(): raise ValueError( "You are attempting to load an IPEX version AWQ model with a device_map that contains disk device." " This is not supported. Please make sure only cpu and xpu in the device_map." ) else: if not torch.cuda.is_available(): raise RuntimeError( "GPU is required to run AWQ quantized model. You can use IPEX version AWQ if you have an Intel CPU" ) if device_map is None: logger.warning_once( "You have loaded an AWQ model on CPU and have a CUDA device available, make sure to set " "your model on a GPU device in order to run your model." ) elif device_map is not None: if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()): raise ValueError( "You are attempting to load an AWQ model with a device_map that contains a CPU or disk device." " This is not supported. Please remove the CPU or disk device from the device_map." ) def update_torch_dtype(self, torch_dtype): if torch_dtype is None: torch_dtype = torch.float16 logger.info("Loading the model in `torch.float16`. To overwrite it, set `torch_dtype` manually.") elif torch_dtype != torch.float16: logger.warning("We suggest you to set `torch_dtype=torch.float16` for better efficiency with AWQ.") return torch_dtype def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs): from ..integrations import get_keys_to_not_convert, replace_quantization_scales, replace_with_awq_linear self.modules_to_not_convert = get_keys_to_not_convert(model) if self.quantization_config.modules_to_not_convert is not None: self.modules_to_not_convert.extend(self.quantization_config.modules_to_not_convert) model, has_been_replaced = replace_with_awq_linear( model, quantization_config=self.quantization_config, modules_to_not_convert=self.modules_to_not_convert ) model = replace_quantization_scales(model, model.config.model_type) if not has_been_replaced: logger.warning( "You are loading an AWQ model but no linear modules were found in your model." " Please double check your model architecture, or submit an issue on github if you think this is a bug." ) def _process_model_after_weight_loading(self, model, **kwargs): if self.quantization_config.do_fuse: from ..integrations import fuse_awq_modules model = fuse_awq_modules(model, self.quantization_config) model._awq_is_fused = True # TODO: consider storing this flag in model.config instead if self.quantization_config.version == AWQLinearVersion.EXLLAMA: from ..integrations import post_init_awq_exllama_modules model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) if self.quantization_config.version == AWQLinearVersion.IPEX: from ..integrations import post_init_awq_ipex_modules model = post_init_awq_ipex_modules(model) def is_serializable(self, safe_serialization=None): # AWQ through auto-awq has been always serializable, except if the model is fused. if self.quantization_config.do_fuse: logger.warning("You cannot save an AWQ model that uses fused modules!") return False if self.quantization_config.version == AWQLinearVersion.EXLLAMA: logger.warning("You cannot save an AWQ model that uses Exllama backend!") return False return True @property def is_trainable(self): # AWQ supports PEFT fine-tuning from version 0.2.0 MIN_AWQ_VERSION_FOR_PEFT = "0.2.0" return version.parse(importlib.metadata.version("autoawq")) >= version.parse(MIN_AWQ_VERSION_FOR_PEFT)
class_definition
1,040
6,901
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_awq.py
null
343
class Bnb4BitHfQuantizer(HfQuantizer): """ 4-bit quantization from bitsandbytes.py quantization method: before loading: converts transformer layers into Linear4bit during loading: load 16bit weight and pass to the layer object after: quantizes individual weights in Linear4bit into 4bit at the first .cuda() call saving: from state dict, as usual; saves weights and `quant_state` components loading: need to locate `quant_state` components and pass to Param4bit constructor """ use_keep_in_fp32_modules = True requires_parameters_quantization = True requires_calibration = False required_packages = ["bitsandbytes", "accelerate"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) if self.quantization_config.llm_int8_skip_modules is not None: self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules def validate_environment(self, *args, **kwargs): if not is_accelerate_available(): raise ImportError( f"Using `bitsandbytes` 4-bit quantization requires Accelerate: `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`" ) if not is_bitsandbytes_available(): raise ImportError( "Using `bitsandbytes` 4-bit quantization requires the latest version of bitsandbytes: `pip install -U bitsandbytes`" ) from ..integrations import validate_bnb_backend_availability from ..utils import is_bitsandbytes_multi_backend_available bnb_multibackend_is_enabled = is_bitsandbytes_multi_backend_available() validate_bnb_backend_availability(raise_exception=True) if kwargs.get("from_tf", False) or kwargs.get("from_flax", False): raise ValueError( "Converting into 4-bit or 8-bit weights from tf/flax weights is currently not supported, please make" " sure the weights are in PyTorch format." ) device_map = kwargs.get("device_map", None) if ( device_map is not None and isinstance(device_map, dict) and not self.quantization_config.llm_int8_enable_fp32_cpu_offload ): device_map_without_lm_head = { key: device_map[key] for key in device_map.keys() if key not in self.modules_to_not_convert } if set(device_map.values()) == {"cpu"} and bnb_multibackend_is_enabled: pass elif "cpu" in device_map_without_lm_head.values() or "disk" in device_map_without_lm_head.values(): raise ValueError( "Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the " "quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules " "in 32-bit, you need to set `llm_int8_enable_fp32_cpu_offload=True` and pass a custom `device_map` to " "`from_pretrained`. Check " "https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu " "for more details. " ) if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.39.0"): raise ValueError( "You have a version of `bitsandbytes` that is not compatible with 4bit inference and training" " make sure you have the latest version of `bitsandbytes` installed" ) def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype": if version.parse(importlib.metadata.version("accelerate")) > version.parse("0.19.0"): from accelerate.utils import CustomDtype if target_dtype != torch.int8: logger.info("target_dtype {target_dtype} is replaced by `CustomDtype.INT4` for 4-bit BnB quantization") return CustomDtype.INT4 else: raise ValueError( "You are using `device_map='auto'` on a 4bit loaded version of the model. To automatically compute" " the appropriate device map, you should upgrade your `accelerate` library," "`pip install --upgrade accelerate` or install it from source to support fp4 auto device map" "calculation. You may encounter unexpected behavior, or pass your own device map" ) def check_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: Dict[str, Any], **kwargs, ) -> bool: import bitsandbytes as bnb module, tensor_name = get_module_from_name(model, param_name) if isinstance(module._parameters.get(tensor_name, None), bnb.nn.Params4bit): # Add here check for loaded components' dtypes once serialization is implemented return True elif isinstance(module, bnb.nn.Linear4bit) and tensor_name == "bias": # bias could be loaded by regular set_module_tensor_to_device() from accelerate, # but it would wrongly use uninitialized weight there. return True else: return False def create_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", state_dict: Dict[str, Any], unexpected_keys: Optional[List[str]] = None, ): """ combines logic from _load_state_dict_into_meta_model and .integrations.bitsandbytes.py::set_module_quantized_tensor_to_device() """ import bitsandbytes as bnb module, tensor_name = get_module_from_name(model, param_name) if tensor_name not in module._parameters: raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") old_value = getattr(module, tensor_name) # `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)). if isinstance(target_device, int) and is_torch_npu_available(): target_device = f"npu:{target_device}" if tensor_name == "bias": if param_value is None: new_value = old_value.to(target_device) else: new_value = param_value.to(target_device) new_value = torch.nn.Parameter(new_value, requires_grad=old_value.requires_grad) module._parameters[tensor_name] = new_value return if not isinstance(module._parameters[tensor_name], bnb.nn.Params4bit): raise ValueError("this function only loads `Linear4bit components`") if ( old_value.device == torch.device("meta") and target_device not in ["meta", torch.device("meta")] and param_value is None ): raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {target_device}.") # construct `new_value` for the module._parameters[tensor_name]: if self.pre_quantized: # 4bit loading. Collecting components for restoring quantized weight # This can be expanded to make a universal call for any quantized weight loading if not self.is_serializable: raise ValueError( "Detected int4 weights but the version of bitsandbytes is not compatible with int4 serialization. " "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." ) if (param_name + ".quant_state.bitsandbytes__fp4" not in state_dict) and ( param_name + ".quant_state.bitsandbytes__nf4" not in state_dict ): raise ValueError( f"Supplied state dict for {param_name} does not contain `bitsandbytes__*` and possibly other `quantized_stats` components." ) quantized_stats = {} for k, v in state_dict.items(): if param_name + "." in k: quantized_stats[k] = v if unexpected_keys is not None and k in unexpected_keys: unexpected_keys.remove(k) param_kwargs = {} if self.is_bnb_supports_quant_storage_module: param_kwargs["module"] = module new_value = bnb.nn.Params4bit.from_prequantized( data=param_value, quantized_stats=quantized_stats, requires_grad=False, device=target_device, **param_kwargs, ) else: new_value = param_value.to("cpu") # Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls, Conv1D): new_value = new_value.T kwargs = old_value.__dict__ new_value = bnb.nn.Params4bit(new_value, requires_grad=False, **kwargs).to(target_device) module._parameters[tensor_name] = new_value # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer.adjust_max_memory def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]: # need more space for buffers that are created during quantization max_memory = {key: val * 0.90 for key, val in max_memory.items()} return max_memory # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer.update_torch_dtype def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": if torch_dtype is None: # We force the `dtype` to be float16, this is a requirement from `bitsandbytes` logger.info( "Overriding torch_dtype=%s with `torch_dtype=torch.float16` due to " "requirements of `bitsandbytes` to enable model loading in 8-bit or 4-bit. " "Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass" " torch_dtype=torch.float16 to remove this warning.", torch_dtype, ) torch_dtype = torch.float16 return torch_dtype def update_device_map(self, device_map): if device_map is None: if torch.cuda.is_available(): device_map = {"": torch.cuda.current_device()} elif is_torch_npu_available(): device_map = {"": f"npu:{torch.npu.current_device()}"} elif is_torch_xpu_available(): device_map = {"": f"xpu:{torch.xpu.current_device()}"} else: device_map = {"": "cpu"} logger.info( "The device_map was not initialized. " f"Setting device_map to {device_map}. " "If you want to use the model for inference, please set device_map ='auto' " ) return device_map # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer._process_model_before_weight_loading def _process_model_before_weight_loading( self, model: "PreTrainedModel", device_map, keep_in_fp32_modules: List[str] = [], **kwargs, ): from ..integrations import get_keys_to_not_convert, replace_with_bnb_linear llm_int8_enable_fp32_cpu_offload = self.quantization_config.llm_int8_enable_fp32_cpu_offload # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if self.quantization_config.llm_int8_skip_modules is None: self.modules_to_not_convert = get_keys_to_not_convert(model) else: self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules if not isinstance(self.modules_to_not_convert, list): self.modules_to_not_convert = [self.modules_to_not_convert] self.modules_to_not_convert.extend(keep_in_fp32_modules) # Extend `self.modules_to_not_convert` to keys that are supposed to be offloaded to `cpu` or `disk` if isinstance(device_map, dict) and len(device_map.keys()) > 1: keys_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]] if len(keys_on_cpu) > 0 and not llm_int8_enable_fp32_cpu_offload: raise ValueError( "If you want to offload some keys to `cpu` or `disk`, you need to set " "`llm_int8_enable_fp32_cpu_offload=True`. Note that these modules will not be " " converted to 8-bit but kept in 32-bit." ) self.modules_to_not_convert.extend(keys_on_cpu) model = replace_with_bnb_linear( model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config ) # TODO: consider bringing replace_with_bnb_linear() code from ..integrations/bitsandbyter.py to here model.config.quantization_config = self.quantization_config # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer._process_model_after_weight_loading with 8bit->4bit def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): model.is_loaded_in_4bit = True model.is_4bit_serializable = self.is_serializable() return model def is_serializable(self, safe_serialization=None): _is_4bit_serializable = version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse("0.41.3") if not _is_4bit_serializable: logger.warning( "You are calling `save_pretrained` to a 4-bit converted model, but your `bitsandbytes` version doesn't support it. " "If you want to save 4-bit models, make sure to have `bitsandbytes>=0.41.3` installed." ) return False return True @cached_property def is_bnb_supports_quant_storage_module(self) -> bool: """ determines if the current version of bitsandbytes supports the `module` parameter in `Params4bit.from_prequantized` :return: """ return version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse("0.43.3") @property def is_trainable(self) -> bool: return True def _dequantize(self, model): from ..integrations import dequantize_and_replace model = dequantize_and_replace( model, self.modules_to_not_convert, quantization_config=self.quantization_config ) return model
class_definition
1,246
16,330
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_4bit.py
null
344
class TorchAoHfQuantizer(HfQuantizer): """ Quantizer for torchao: https://github.com/pytorch/ao/ """ requires_parameters_quantization = True requires_calibration = False required_packages = ["torchao"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) def validate_environment(self, *args, **kwargs): if not is_torchao_available(): raise ImportError("Loading an torchao quantized model requires torchao library (`pip install torchao`)") self.offload = False device_map = kwargs.get("device_map", None) if isinstance(device_map, dict): if "cpu" in device_map.values() or "disk" in device_map.values(): if self.pre_quantized: raise ValueError( "You are attempting to perform cpu/disk offload with a pre-quantized torchao model " "This is not supported yet . Please remove the CPU or disk device from the device_map." ) else: self.offload = True if self.pre_quantized: weights_only = kwargs.get("weights_only", None) if weights_only: torch_version = version.parse(importlib.metadata.version("torch")) if torch_version < version.parse("2.5.0"): raise RuntimeError( f"In order to use torchao pre-quantized model, you need to have torch>=2.5.0. However, the current version is {torch_version}." f" You can also set with `weights_only=False` in `from_pretrained` if you don't want to update torch" ) def update_torch_dtype(self, torch_dtype): if self.quantization_config.quant_type == "int4_weight_only": if torch_dtype is not None and torch_dtype != torch.bfloat16: logger.warning_once( f"Setting torch_dtype to {torch_dtype} for int4_weight_only quantization, but only bfloat16 is supported right now. Please set the torch_dtype to bfloat16." ) if torch_dtype is None: logger.warning_once( "Setting torch_dtype to torch.bfloat16 for int4_weight_only quantization since only bfloat16 is supported right now. Please set torch_dtype=torch.bfloat16 to remove this warning." ) torch_dtype = torch.bfloat16 if self.quantization_config.quant_type == "int8_dynamic_activation_int8_weight": if torch_dtype is None: logger.info( "Setting torch_dtype to torch.float32 for int8_dynamic_activation_int8_weight quantization as no torch_dtype was specified in from_pretrained" ) # we need to set the torch_dtype, otherwise we have dtype mismatch when performing the quantized linear op torch_dtype = torch.float32 return torch_dtype def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype": if version.parse(importlib.metadata.version("accelerate")) > version.parse("0.19.0"): from accelerate.utils import CustomDtype map_to_target_dtype = { "int4_weight_only": CustomDtype.INT4, "int8_weight_only": torch.int8, "int8_dynamic_activation_int8_weight": torch.int8, } return map_to_target_dtype[self.quantization_config.quant_type] else: raise ValueError( "You are using `device_map='auto'` on a torchao quantized model. To automatically compute" " the appropriate device map, you should upgrade your `accelerate` library with " "`pip install --upgrade accelerate`" ) def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]: # need more space for the quantization parameters (e.g. scale). Tested with int4 wo and group size = 128 max_memory = {key: val * 0.9 for key, val in max_memory.items()} return max_memory def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs): from ..integrations import get_keys_to_not_convert self.modules_to_not_convert = get_keys_to_not_convert(model) if self.quantization_config.modules_to_not_convert is not None: self.modules_to_not_convert.extend(self.quantization_config.modules_to_not_convert) return def check_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: Dict[str, Any], **kwargs, ) -> bool: param_device = kwargs.pop("param_device", None) # check if the param_name is not in self.modules_to_not_convert if any((key + "." in param_name) or (key == param_name) for key in self.modules_to_not_convert): return False elif param_device == "cpu" and self.offload: # We don't quantize weights that we offload return False else: # we only quantize the weight of nn.Linear module, tensor_name = get_module_from_name(model, param_name) return isinstance(module, torch.nn.Linear) and (tensor_name == "weight") def create_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", state_dict: Dict[str, Any], unexpected_keys: List[str], ): """ Each nn.Linear layer that needs to be quantized is processsed here. First, we set the value the weight tensor, then we move it to the target device. Finally, we quantize the module. """ from torchao.quantization import quantize_ module, tensor_name = get_module_from_name(model, param_name) if self.pre_quantized: module._parameters[tensor_name] = torch.nn.Parameter(param_value.to(device=target_device)) if isinstance(module, nn.Linear): module.extra_repr = types.MethodType(_linear_extra_repr, module) else: module._parameters[tensor_name] = torch.nn.Parameter(param_value).to(device=target_device) quantize_(module, self.quantization_config.get_apply_tensor_subclass()) def _process_model_after_weight_loading(self, model, **kwargs): """No process required for torchao quantized model""" return def is_serializable(self, safe_serialization=None): if safe_serialization: logger.warning( "torchao quantized model does not support safe serialization, " "please set `safe_serialization` to False" ) return False _is_torchao_serializable = version.parse(importlib.metadata.version("huggingface_hub")) >= version.parse( "0.25.0" ) if not _is_torchao_serializable: logger.warning("torchao quantized model is only serializable after huggingface_hub >= 0.25.0 ") if self.offload and self.quantization_config.modules_to_not_convert is None: logger.warning( "The model contains offloaded modules and these modules are not quantized. We don't recommend saving the model as we won't be able to reload them." "If you want to specify modules to not quantize, please specify modules_to_not_convert in the quantization_config." ) return False return _is_torchao_serializable @property def is_trainable(self): supported_quant_types_for_training = [ "int8_weight_only", "int8_dynamic_activation_int8_weight", ] return self.quantization_config.quant_type in supported_quant_types_for_training
class_definition
2,144
10,119
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_torchao.py
null
345
class GptqHfQuantizer(HfQuantizer): """ Quantizer of the GPTQ method - for GPTQ the quantizer support calibration of the model through `auto_gptq` or `gptqmodel` package. Quantization is done under the hood for users if they load a non-prequantized model. """ requires_calibration = False required_packages = ["optimum", "auto_gptq", "gptqmodel"] optimum_quantizer = None def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs): super().__init__(quantization_config, **kwargs) if not is_optimum_available(): raise ImportError("Loading a GPTQ quantized model requires optimum (`pip install optimum`)") from optimum.gptq import GPTQQuantizer self.optimum_quantizer = GPTQQuantizer.from_dict(self.quantization_config.to_dict_optimum()) def validate_environment(self, *args, **kwargs): if not is_optimum_available(): raise ImportError("Loading a GPTQ quantized model requires optimum (`pip install optimum`)") if is_auto_gptq_available() and is_gptqmodel_available(): logger.warning("Detected gptqmodel and auto-gptq, will use gptqmodel") gptq_supports_cpu = ( is_auto_gptq_available() and version.parse(importlib.metadata.version("auto-gptq")) > version.parse("0.4.2") ) or is_gptqmodel_available() if not gptq_supports_cpu and not torch.cuda.is_available(): raise RuntimeError("GPU is required to quantize or run quantize model.") elif not (is_auto_gptq_available() or is_gptqmodel_available()): raise ImportError( "Loading a GPTQ quantized model requires gptqmodel (`pip install gptqmodel`) or auto-gptq (`pip install auto-gptq`) library. " ) elif is_auto_gptq_available() and version.parse(importlib.metadata.version("auto_gptq")) < version.parse( "0.4.2" ): raise ImportError( "You need a version of auto_gptq >= 0.4.2 to use GPTQ: `pip install --upgrade auto-gptq` or use gptqmodel by `pip install gptqmodel>=1.4.3`." ) elif is_gptqmodel_available() and ( version.parse(importlib.metadata.version("gptqmodel")) < version.parse("1.4.3") or version.parse(importlib.metadata.version("optimum")) < version.parse("1.23.99") ): raise ImportError("The gptqmodel version should be >= 1.4.3, optimum version should >= 1.24.0") def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": if torch_dtype is None: torch_dtype = torch.float16 logger.info("Loading the model in `torch.float16`. To overwrite it, set `torch_dtype` manually.") elif torch_dtype != torch.float16: logger.info("We suggest you to set `torch_dtype=torch.float16` for better efficiency with GPTQ.") return torch_dtype def update_device_map(self, device_map): if device_map is None: device_map = {"": torch.device("cpu")} # Only with auto-gptq do not support CPU, we should move the model to cuda if available. if not is_gptqmodel_available() and device_map in ("cpu", {"": torch.device("cpu")}): device_map == {"": 0} return device_map def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs): if model.__class__.main_input_name != "input_ids": raise RuntimeError("We can only quantize pure text model.") if self.pre_quantized: model = self.optimum_quantizer.convert_model(model, **kwargs) def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): if self.pre_quantized: model = self.optimum_quantizer.post_init_model(model) else: if self.quantization_config.tokenizer is None: self.quantization_config.tokenizer = model.name_or_path self.optimum_quantizer.quantize_model(model, self.quantization_config.tokenizer) model.config.quantization_config = GPTQConfig.from_dict(self.optimum_quantizer.to_dict()) @property def is_trainable(self, model: Optional["PreTrainedModel"] = None): return True def is_serializable(self, safe_serialization=None): return True
class_definition
1,082
5,430
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_gptq.py
null
346
class QuantoHfQuantizer(HfQuantizer): """ Quantizer for the quanto library """ required_packages = ["quanto", "accelerate"] requires_parameters_quantization = True requires_calibration = False def __init__(self, quantization_config: QuantoConfig, **kwargs): super().__init__(quantization_config, **kwargs) self.post_init() def post_init(self): r""" Safety checker """ if self.quantization_config.activations is not None and not self.pre_quantized: raise ValueError( "We don't support quantizing the activations with transformers library." "Use quanto library for more complex use cases such as activations quantization, calibration and quantization aware training." ) def validate_environment(self, *args, **kwargs): if not is_optimum_quanto_available(): raise ImportError( "Loading an optimum-quanto quantized model requires optimum-quanto library (`pip install optimum-quanto`)" ) if not is_accelerate_available(): raise ImportError( "Loading an optimum-quanto quantized model requires accelerate library (`pip install accelerate`)" ) def update_device_map(self, device_map): if device_map is None: device_map = {"": "cpu"} logger.info( "The device_map was not initialized. " "Setting device_map to {'':'cpu'}. " "If you want to use the model for inference, please set device_map ='auto'" ) return device_map def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": if torch_dtype is None: logger.info("You did not specify `torch_dtype` in `from_pretrained`. Setting it to `torch.float32`.") torch_dtype = torch.float32 return torch_dtype def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> List[str]: if is_optimum_quanto_available(): from optimum.quanto import QModuleMixin not_missing_keys = [] for name, module in model.named_modules(): if isinstance(module, QModuleMixin): for missing in missing_keys: if ( (name in missing or name in f"{prefix}.{missing}") and not missing.endswith(".weight") and not missing.endswith(".bias") ): not_missing_keys.append(missing) return [k for k in missing_keys if k not in not_missing_keys] def check_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: Dict[str, Any], **kwargs, ) -> bool: """ Check if a parameter needs to be quantized. """ if is_optimum_quanto_available(): from optimum.quanto import QModuleMixin device_map = kwargs.get("device_map", None) param_device = kwargs.get("param_device", None) # we don't quantize the model if the module is going to be offloaded to the cpu if device_map is not None and param_device is not None: device_map_values = set(device_map.values()) if param_device == "cpu" and len(device_map_values) > 1: if not (device_map_values == {"cpu"} or device_map_values == {"cpu", "disk"}): return False module, tensor_name = get_module_from_name(model, param_name) # We only quantize the weights and the bias is not quantized. if isinstance(module, QModuleMixin) and "weight" in tensor_name: # if the weights are quantized, don't need to recreate it again with `create_quantized_param` return not module.frozen else: return False def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]: max_memory = {key: val * 0.90 for key, val in max_memory.items()} return max_memory def create_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", *args, **kwargs, ): """ Create the quantized parameter by calling .freeze() after setting it to the module. """ from accelerate.utils import set_module_tensor_to_device set_module_tensor_to_device(model, param_name, target_device, param_value) module, _ = get_module_from_name(model, param_name) module.freeze() module.weight.requires_grad = False def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype": if version.parse(importlib.metadata.version("accelerate")) > version.parse("0.27.0"): from accelerate.utils import CustomDtype mapping = { "int8": torch.int8, "float8": CustomDtype.FP8, "int4": CustomDtype.INT4, "int2": CustomDtype.INT2, } target_dtype = mapping[self.quantization_config.weights] return target_dtype else: raise ValueError( "You are using `device_map='auto'` on an optimum-quanto quantized model. To automatically compute" " the appropriate device map, you should upgrade your `accelerate` library," "`pip install --upgrade accelerate` or install it from source." ) def _process_model_before_weight_loading( self, model: "PreTrainedModel", keep_in_fp32_modules: List[str] = [], **kwargs ): from ..integrations import get_keys_to_not_convert, replace_with_quanto_layers # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if self.quantization_config.modules_to_not_convert is None: self.modules_to_not_convert = get_keys_to_not_convert(model) else: self.modules_to_not_convert = self.quantization_config.modules_to_not_convert if not isinstance(self.modules_to_not_convert, list): self.modules_to_not_convert = [self.modules_to_not_convert] self.modules_to_not_convert.extend(keep_in_fp32_modules) model, _ = replace_with_quanto_layers( model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config ) model.config.quantization_config = self.quantization_config def _process_model_after_weight_loading(self, model, **kwargs): return model @property def is_trainable(self, model: Optional["PreTrainedModel"] = None): return True def is_serializable(self, safe_serialization=None): return False
class_definition
1,139
8,106
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_quanto.py
null
347
class EetqHfQuantizer(HfQuantizer): """ 8-bit quantization from EETQ quantization method: before loading: converts transformer layers into W8A16Linear during loading: load 16bit weight and pass to the layer object after: quantizes individual weights in Linear8bitLt into 8bit at first .cuda() call """ requires_parameters_quantization = True requires_calibration = False required_packages = ["eetq", "accelerate"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) self.quantization_config = quantization_config def validate_environment(self, *args, **kwargs): if not is_eetq_available(): raise ImportError( "Using `eetq` 8-bit quantization requires eetq." "Please install the latest version of eetq from : https://github.com/NetEase-FuXi/EETQ" ) try: import eetq # noqa: F401 except ImportError as exc: if "shard_checkpoint" in str(exc): # EETQ 1.0.0 is currently broken with the latest transformers because it tries to import the removed # shard_checkpoint function, see https://github.com/NetEase-FuXi/EETQ/issues/34. # TODO: Update message once eetq releases a fix raise ImportError( "You are using a version of EETQ that is incompatible with the current transformers version. " "Either downgrade transformers to <= v4.46.3 or, if available, upgrade EETQ to > v1.0.0." ) from exc else: raise if not is_accelerate_available(): raise ImportError("Loading an EETQ quantized model requires accelerate (`pip install accelerate`)") if kwargs.get("from_tf", False) or kwargs.get("from_flax", False): raise ValueError( "Converting into 8-bit weights from tf/flax weights is currently not supported, please make" " sure the weights are in PyTorch format." ) if not torch.cuda.is_available(): raise RuntimeError("No GPU found. A GPU is needed for quantization.") device_map = kwargs.get("device_map", None) if device_map is None: logger.warning_once( "You have loaded an EETQ model on CPU and have a CUDA device available, make sure to set " "your model on a GPU device in order to run your model." ) elif device_map is not None: if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()): raise ValueError( "You are attempting to load an EETQ model with a device_map that contains a CPU or disk device." " This is not supported. Please remove the CPU or disk device from the device_map." ) def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": if torch_dtype is None: torch_dtype = torch.float16 logger.info( "Overriding torch_dtype=%s with `torch_dtype=torch.float16` due to " "requirements of `eetq` to enable model loading in 8-bit. " "Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass" " torch_dtype=torch.float16 to remove this warning.", torch_dtype, ) elif torch_dtype != torch.float16: logger.info("We suggest you to set `torch_dtype=torch.float16` for better efficiency with EETQ.") return torch_dtype def check_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: Dict[str, Any], **kwargs, ): from eetq import EetqLinear module, tensor_name = get_module_from_name(model, param_name) if isinstance(module, EetqLinear): if self.pre_quantized or tensor_name == "bias": if tensor_name == "weight" and param_value.dtype != torch.int8: raise ValueError("Expect quantized weights but got an unquantized weight") return False else: if tensor_name == "weight_scale": raise ValueError("Expect unquantized weights but got a quantized weight_scale") return True return False def create_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", state_dict: Dict[str, Any], unexpected_keys: Optional[List[str]] = None, ): """ quantizes weights into qweight and weight_scales """ from eetq import quantize_and_preprocess_weights module, tensor_name = get_module_from_name(model, param_name) new_value, weight_scale = quantize_and_preprocess_weights(param_value) module._buffers[tensor_name] = new_value.to(target_device) module.register("weight_scales", weight_scale.to(target_device)) def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): return model def _process_model_before_weight_loading( self, model: "PreTrainedModel", device_map, keep_in_fp32_modules: List[str] = [], **kwargs, ): from ..integrations import get_keys_to_not_convert, replace_with_eetq_linear self.modules_to_not_convert = get_keys_to_not_convert(model) if self.quantization_config.modules_to_not_convert is not None: self.modules_to_not_convert.extend(self.quantization_config.modules_to_not_convert) model = replace_with_eetq_linear( model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config, pre_quantized=self.pre_quantized, ) model.config.quantization_config = self.quantization_config def is_serializable(self, safe_serialization=None): return True @property def is_trainable(self) -> bool: return True
class_definition
1,001
7,325
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_eetq.py
null
348
class BitNetHfQuantizer(HfQuantizer): """ 1.58-bit quantization from BitNet quantization method: Before loading: it converts the linear layers into BitLinear layers during loading. Checkout the paper introducing this method : https://arxiv.org/pdf/2402.17764 """ requires_parameters_quantization = False requires_calibration = True required_packages = ["accelerate"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) self.quantization_config = quantization_config def validate_environment(self, *args, **kwargs): if not is_accelerate_available(): raise ImportError("Loading a BitNet quantized model requires accelerate (`pip install accelerate`)") if kwargs.get("from_tf", False) or kwargs.get("from_flax", False): raise ValueError( "Loading ternary weights from tf/flax is currently not supported, please make" " sure the weights are in PyTorch format." ) if not torch.cuda.is_available(): logger.warning_once( "You don't have a GPU available to load the model, the inference will be slow because of weight unpacking" ) return device_map = kwargs.get("device_map", None) if device_map is None: logger.warning_once( "You have loaded a BitNet model on CPU and have a CUDA device available, make sure to set " "your model on a GPU device in order to run your model." ) elif device_map is not None: if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()): raise ValueError( "You are attempting to load a BitNet model with a device_map that contains a CPU or disk device." "This is not supported. Please remove the CPU or disk device from the device_map." ) def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): return model def _process_model_before_weight_loading( self, model: "PreTrainedModel", device_map, keep_in_fp32_modules: List[str] = [], **kwargs, ): from ..integrations import get_keys_to_not_convert, replace_with_bitnet_linear self.modules_to_not_convert = get_keys_to_not_convert(model) if self.quantization_config.modules_to_not_convert is not None: self.modules_to_not_convert.extend(self.quantization_config.modules_to_not_convert) model = replace_with_bitnet_linear( model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config, pre_quantized=self.pre_quantized, ) def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]: max_memory = {key: val * 0.90 for key, val in max_memory.items()} return max_memory def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype": target_dtype = torch.int8 return target_dtype def is_serializable(self, safe_serialization=None): return True @property def is_trainable(self) -> bool: return False
class_definition
923
4,301
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bitnet.py
null
349
class AqlmHfQuantizer(HfQuantizer): """ Quantizer of the AQLM method. Enables the loading of prequantized models. """ requires_calibration = True required_packages = ["aqlm"] optimum_quantizer = None def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs): super().__init__(quantization_config, **kwargs) self.quantization_config = quantization_config def validate_environment(self, *args, **kwargs): if not is_accelerate_available(): raise ImportError("Using `aqlm` quantization requires Accelerate: `pip install accelerate`") if not is_aqlm_available(): raise ImportError("Using `aqlm` quantization requires AQLM: `pip install aqlm[gpu,cpu]`") def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": if torch_dtype is None: if torch.cuda.is_available(): torch_dtype = torch.float16 logger.info( "CUDA available. Assuming AQLM inference on GPU and loading the model in `torch.float16`. To overwrite it, set `torch_dtype` manually." ) else: torch_dtype = torch.float32 logger.info( "CUDA is unavailable. Assuming AQLM inference on CPU and loading the model in `torch.float32`. To overwrite it, set `torch_dtype` manually." ) return torch_dtype def _process_model_before_weight_loading( self, model: "PreTrainedModel", **kwargs, ): replace_with_aqlm_linear( model, quantization_config=self.quantization_config, linear_weights_not_to_quantize=self.quantization_config.linear_weights_not_to_quantize, ) model.config.quantization_config = self.quantization_config def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): return model @property def is_trainable(self, model: Optional["PreTrainedModel"] = None): aqlm_supports_training = version.parse(importlib.metadata.version("aqlm")) >= version.parse("1.0.2") if aqlm_supports_training: return True else: logger.warning( f"Currently installed `aqlm` version ({importlib.metadata.version('aqlm')}) doesn't support training. If you wish to train a quantized model, please update `aqlm` with `pip install aqlm>=1.0.2`" ) return False def is_serializable(self, safe_serialization=None): return True
class_definition
1,096
3,691
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_aqlm.py
null
350
class HqqHfQuantizer(HfQuantizer): """ HQQ quantizer base HF class. nn.Linear modules are first tagged with quant_config in _process_model_before_weight_loading(). The actual quantization and offloading to the GPU is done in check_quantized_param(). """ use_keep_in_fp32_modules = False requires_parameters_quantization = True requires_calibration = False required_packages = ["hqq"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) self.torch_dtype = None self.using_multi_gpu = False def validate_environment(self, *args, **kwargs): if not (is_hqq_available()): raise ImportError( "A valid HQQ version (>=0.2.1) is not available. Please follow the instructions to install it: `https://github.com/mobiusml/hqq/`." ) if kwargs.get("from_tf", False) or kwargs.get("from_flax", False): raise ValueError( "Converting weights from tf/flax weights is currently not supported, please make" " sure the weights are in PyTorch format." ) if not torch.cuda.is_available(): raise RuntimeError("No GPU found. A GPU is needed for quantization.") if self.torch_dtype is None: if "torch_dtype" in kwargs: self.torch_dtype = kwargs["torch_dtype"] else: self.torch_dtype = torch.float32 logger.info("Setting torch_dtype to torch.float32 as the default value since it was not specified.") device_map = kwargs.get("device_map", None) if isinstance(device_map, dict): if "cpu" in device_map.values() or "disk" in device_map.values(): raise ValueError( "You are attempting to use an HQQ model with a device_map that contains a CPU or disk device." " This is not supported. Please remove the CPU or disk device from the device_map." ) else: self.using_multi_gpu = len(set(device_map.values())) > 1 def update_missing_keys( self, model: "PreTrainedModel", missing_keys: List[str], prefix: str, **kwargs ) -> List[str]: if self.pre_quantized: return [key for key in missing_keys if ("weight" not in key)] else: return missing_keys # Adds missing keys for HQQLinear modules that are loaded but the model with initialized with torch.nn.Linear def update_expected_keys( self, model: "PreTrainedModel", expected_keys: List[str], loaded_keys: List[str] ) -> List[str]: if not self.pre_quantized: return expected_keys # Collects all quantizable (linear) layers def _find_hqq_quantizable_layers(model, layers): for name, module in model.named_children(): if isinstance(module, (torch.nn.Linear)): layers.add(module.name) _find_hqq_quantizable_layers(module, layers) new_keys = set(expected_keys) if is_hqq_available(): from hqq.core.quantize import HQQLinear # Name modules for name, module in model.named_modules(): module.name = name # valid modules are Linear layers that have HQQLinear state_dict. We ignore skip_modules and any layers with Linear state_dict() params _valid_modules = set() _find_hqq_quantizable_layers(model, _valid_modules) _valid_modules -= set(model.config.quantization_config["skip_modules"]) # Append new expected layers based on _ref_keys _ref_keys = HQQLinear( linear_layer=None, quant_config=None, compute_dtype=torch.float16, device="cpu" ).state_dict_keys() - {"bias"} # Clean-up _rm_keys = set() for key in new_keys: if any(_module in key for _module in _valid_modules): _rm_keys.add(key) new_keys -= _rm_keys # At this point, new_keys contains all the keys of the layers that are NOT HQQLinear or torch.nn.Linear # Re-populate Linear/HQQLinear for _module in _valid_modules: if _module + ".weight" in loaded_keys: new_keys.add(_module + ".weight") else: new_keys.update({_module + "." + _ref_key for _ref_key in _ref_keys}) if _module + ".bias" in loaded_keys: new_keys.add(_module + ".bias") return list(new_keys) def check_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: Dict[str, Any], **kwargs, ) -> bool: if is_hqq_available(): from hqq.core.quantize import HQQLinear module, tensor_name = get_module_from_name(model, param_name) if self.pre_quantized: return ( (isinstance(module, torch.nn.Linear) or isinstance(module, HQQLinear)) and tensor_name != "weight" and tensor_name != "bias" ) else: return isinstance(module, torch.nn.Linear) and tensor_name == "weight" def create_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", state_dict: Dict[str, Any], unexpected_keys: List[str], ): """ Each nn.Linear layer is processsed here. We first check if the corresponding module state_dict contains already HQQ quantized parameters. If not, we create a temp linear layer with the module state_dict params and use it for quantization """ if is_hqq_available(): from hqq.core.quantize import HQQLinear module, tensor_name = get_module_from_name(model, param_name) layer_name = ".".join(param_name.split(".")[:-1]) parent_module = find_parent(model, layer_name) node = layer_name.split(".")[-1] # set module state_dict module_state_dict = {} for k, v in state_dict.items(): if layer_name + "." in k: module_state_dict[k.split(".")[-1]] = v if unexpected_keys is not None and k in unexpected_keys: unexpected_keys.remove(k) if self.pre_quantized: if isinstance(module, HQQLinear): return else: hqq_layer = HQQLinear( linear_layer=None, quant_config=None, compute_dtype=self.torch_dtype, device=target_device, ) hqq_layer.load_state_dict(module_state_dict) if hqq_layer.bias is not None and isinstance(hqq_layer.bias, torch.Tensor): hqq_layer.bias = torch.nn.Parameter(hqq_layer.bias) if self.using_multi_gpu: hqq_layer = self._patch_layer_for_multigpu(hqq_layer) setattr(parent_module, node, hqq_layer) # cleanup del module.__dict__, module torch.cuda.empty_cache() return # Step 1: populate module with weight/bias from module state dict for key in module_state_dict: setattr(module, key, torch.nn.Parameter(module_state_dict[key])) # Step 2: Replace module with either HQQLinear or move it to device. We do this via setattr on the parent as doing on it on the module # directly doesn't work. if hasattr(module, "quant_config"): hqq_layer = HQQLinear( module, module.quant_config, compute_dtype=self.torch_dtype, device=target_device, del_orig=True, ) if hqq_layer.bias is not None and isinstance(hqq_layer.bias, torch.Tensor): hqq_layer.bias = torch.nn.Parameter(hqq_layer.bias) if self.using_multi_gpu: hqq_layer = self._patch_layer_for_multigpu(hqq_layer) setattr(parent_module, node, hqq_layer) else: module = module.to(dtype=self.torch_dtype, device=target_device) setattr(parent_module, node, module) torch.cuda.empty_cache() # Remove accelerate hook and uses a simpler forward pass. Otherwise, this breaks with multi-gpu def _patch_layer_for_multigpu(self, hqq_layer): hqq_layer = remove_hook_from_module(hqq_layer) def forward_with_device(self, x): out = torch.matmul(x.to(self.device), self.dequantize().t()) if self.bias is not None: out += self.bias return out hqq_layer.forward = lambda x: forward_with_device(hqq_layer, x) return hqq_layer def _process_model_before_weight_loading( self, model: "PreTrainedModel", device_map, keep_in_fp32_modules: List[str] = None, **kwargs, ): keep_in_fp32_modules = keep_in_fp32_modules if keep_in_fp32_modules is not None else [] # Add the corresponding quant_config to each valid module. This allows us to do the actual nn.Linear -> HQQLinear conversion in create_quantized_param(). # prepare_for_hqq_linear() also sets the right quantization config inside the model (model.config.quantization_config) and the layers (hqq_layer.quant_config) model = prepare_for_hqq_linear(model, quantization_config=self.quantization_config) def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): model.is_hqq_quantized = True model.is_hqq_serializable = self.is_serializable() return model def is_serializable(self, safe_serialization=None): return True @property def is_trainable(self) -> bool: return True
class_definition
1,341
11,437
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_hqq.py
null
351
class VptqHfQuantizer(HfQuantizer): """ Quantizer of the VPTQ method. Enables the loading of prequantized models. """ requires_calibration = True required_packages = ["vptq"] def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs): super().__init__(quantization_config, **kwargs) self.quantization_config = quantization_config def validate_environment(self, *args, **kwargs): if not is_accelerate_available(): raise ImportError("Using `vptq` quantization requires Accelerate: `pip install accelerate`") if not is_vptq_available(): raise ImportError("Using `vptq` quantization requires VPTQ>=0.0.4: `pip install -U vptq`") def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": if torch_dtype is None: if torch.cuda.is_available(): torch_dtype = torch.float16 logger.info( "CUDA available. Assuming VPTQ inference on GPU and loading the model in `torch.float16`. To overwrite it, set `torch_dtype` manually." ) else: import vptq device_availability = getattr(vptq, "device_availability", lambda device: False) if device_availability("cpu") is True: raise RuntimeError("No GPU found. Please wait for the next release of VPTQ to use CPU inference") torch_dtype = torch.float32 logger.info("No GPU found. Assuming VPTQ inference on CPU and loading the model in `torch.float32`.") return torch_dtype def _process_model_before_weight_loading( self, model: "PreTrainedModel", **kwargs, ): """ we don't have param like modules_to_not_convert to indicate which layers should not be quantized because `quantization_config` include the layers that should be quantized """ from ..integrations import replace_with_vptq_linear modules_to_not_convert = kwargs.get("modules_to_not_convert", []) + ( self.quantization_config.modules_to_not_convert or [] ) replace_with_vptq_linear( model, quantization_config=self.quantization_config, modules_to_not_convert=modules_to_not_convert, ) model.config.quantization_config = self.quantization_config def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): return model @property def is_trainable(self, model: Optional["PreTrainedModel"] = None): return False def is_serializable(self, safe_serialization=None): return True
class_definition
996
3,719
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_vptq.py
null
352
class Bnb8BitHfQuantizer(HfQuantizer): """ 8-bit quantization from bitsandbytes quantization method: before loading: converts transformer layers into Linear8bitLt during loading: load 16bit weight and pass to the layer object after: quantizes individual weights in Linear8bitLt into 8bit at fitst .cuda() call saving: from state dict, as usual; saves weights and 'SCB' component loading: need to locate SCB component and pass to the Linear8bitLt object """ use_keep_in_fp32_modules = True requires_parameters_quantization = True requires_calibration = False required_packages = ["bitsandbytes", "accelerate"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) if self.quantization_config.llm_int8_skip_modules is not None: self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules def validate_environment(self, *args, **kwargs): if not is_accelerate_available(): raise ImportError( f"Using `bitsandbytes` 8-bit quantization requires Accelerate: `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`" ) if not is_bitsandbytes_available(): raise ImportError( "Using `bitsandbytes` 8-bit quantization requires the latest version of bitsandbytes: `pip install -U bitsandbytes`" ) from ..integrations import validate_bnb_backend_availability from ..utils import is_bitsandbytes_multi_backend_available bnb_multibackend_is_enabled = is_bitsandbytes_multi_backend_available() validate_bnb_backend_availability(raise_exception=True) if kwargs.get("from_tf", False) or kwargs.get("from_flax", False): raise ValueError( "Converting into 4-bit or 8-bit weights from tf/flax weights is currently not supported, please make" " sure the weights are in PyTorch format." ) device_map = kwargs.get("device_map", None) if ( device_map is not None and isinstance(device_map, dict) and not self.quantization_config.llm_int8_enable_fp32_cpu_offload ): device_map_without_lm_head = { key: device_map[key] for key in device_map.keys() if key not in self.modules_to_not_convert } if set(device_map.values()) == {"cpu"} and bnb_multibackend_is_enabled: pass elif "cpu" in device_map_without_lm_head.values() or "disk" in device_map_without_lm_head.values(): raise ValueError( "Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the " "quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules " "in 32-bit, you need to set `llm_int8_enable_fp32_cpu_offload=True` and pass a custom `device_map` to " "`from_pretrained`. Check " "https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu " "for more details. " ) if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.37.2"): raise ValueError( "You have a version of `bitsandbytes` that is not compatible with 8bit inference and training" " make sure you have the latest version of `bitsandbytes` installed" ) def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]: # need more space for buffers that are created during quantization max_memory = {key: val * 0.90 for key, val in max_memory.items()} return max_memory def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": if torch_dtype is None: # We force the `dtype` to be float16, this is a requirement from `bitsandbytes` logger.info( "Overriding torch_dtype=%s with `torch_dtype=torch.float16` due to " "requirements of `bitsandbytes` to enable model loading in 8-bit or 4-bit. " "Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass" " torch_dtype=torch.float16 to remove this warning.", torch_dtype, ) torch_dtype = torch.float16 return torch_dtype def update_device_map(self, device_map): if device_map is None: if torch.cuda.is_available(): device_map = {"": torch.cuda.current_device()} elif is_torch_xpu_available(): device_map = {"": f"xpu:{torch.xpu.current_device()}"} else: device_map = {"": "cpu"} logger.info( "The device_map was not initialized. " f"Setting device_map to {device_map}. " "If you want to use the model for inference, please set device_map ='auto' " ) return device_map def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype": if target_dtype != torch.int8: logger.info("target_dtype {target_dtype} is replaced by `torch.int8` for 8-bit BnB quantization") return torch.int8 def check_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: Dict[str, Any], **kwargs, ): import bitsandbytes as bnb module, tensor_name = get_module_from_name(model, param_name) if isinstance(module._parameters.get(tensor_name, None), bnb.nn.Int8Params): if self.pre_quantized: if param_name.replace("weight", "SCB") not in state_dict.keys(): raise ValueError("Missing quantization component `SCB`") if param_value.dtype != torch.int8: raise ValueError( f"Incompatible dtype `{param_value.dtype}` when loading 8-bit prequantized weight. Expected `torch.int8`." ) return True return False def create_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", state_dict: Dict[str, Any], unexpected_keys: Optional[List[str]] = None, ): """ combines logic from _load_state_dict_into_meta_model and .integrations.bitsandbytes.py::set_module_quantized_tensor_to_device() needs aux items from state dicts, if found - removes them from unexpected_keys """ import bitsandbytes as bnb fp16_statistics_key = param_name.replace("weight", "SCB") fp16_weights_format_key = param_name.replace("weight", "weight_format") fp16_statistics = state_dict.get(fp16_statistics_key, None) fp16_weights_format = state_dict.get(fp16_weights_format_key, None) module, tensor_name = get_module_from_name(model, param_name) if tensor_name not in module._parameters: raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") old_value = getattr(module, tensor_name) if not isinstance(module._parameters[tensor_name], bnb.nn.Int8Params): raise ValueError(f"Parameter `{tensor_name}` should only be a `bnb.nn.Int8Params` instance.") if ( old_value.device == torch.device("meta") and target_device not in ["meta", torch.device("meta")] and param_value is None ): raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {target_device}.") new_value = param_value.to("cpu") if self.pre_quantized and not self.is_serializable(): raise ValueError( "Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. " "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls, Conv1D): if fp16_statistics is None: new_value = new_value.T kwargs = old_value.__dict__ new_value = bnb.nn.Int8Params(new_value, requires_grad=False, **kwargs).to(target_device) module._parameters[tensor_name] = new_value if fp16_statistics is not None: setattr(module.weight, "SCB", fp16_statistics.to(target_device)) if unexpected_keys is not None: unexpected_keys.remove(fp16_statistics_key) # We just need to pop the `weight_format` keys from the state dict to remove unneeded # messages. The correct format is correctly retrieved during the first forward pass. if fp16_weights_format is not None and unexpected_keys is not None: unexpected_keys.remove(fp16_weights_format_key) def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): model.is_loaded_in_8bit = True model.is_8bit_serializable = self.is_serializable() return model def _process_model_before_weight_loading( self, model: "PreTrainedModel", device_map, keep_in_fp32_modules: List[str] = [], **kwargs, ): from ..integrations import get_keys_to_not_convert, replace_with_bnb_linear llm_int8_enable_fp32_cpu_offload = self.quantization_config.llm_int8_enable_fp32_cpu_offload # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if self.quantization_config.llm_int8_skip_modules is None: self.modules_to_not_convert = get_keys_to_not_convert(model) else: self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules if not isinstance(self.modules_to_not_convert, list): self.modules_to_not_convert = [self.modules_to_not_convert] self.modules_to_not_convert.extend(keep_in_fp32_modules) # Extend `self.modules_to_not_convert` to keys that are supposed to be offloaded to `cpu` or `disk` if isinstance(device_map, dict) and len(device_map.keys()) > 1: keys_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]] if len(keys_on_cpu) > 0 and not llm_int8_enable_fp32_cpu_offload: raise ValueError( "If you want to offload some keys to `cpu` or `disk`, you need to set " "`llm_int8_enable_fp32_cpu_offload=True`. Note that these modules will not be " " converted to 8-bit but kept in 32-bit." ) self.modules_to_not_convert.extend(keys_on_cpu) model = replace_with_bnb_linear( model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config ) # TODO: consider bringing replace_with_bnb_linear() code from ..integrations/bitsandbyter.py to here model.config.quantization_config = self.quantization_config def is_serializable(self, safe_serialization=None): _bnb_supports_8bit_serialization = version.parse(importlib.metadata.version("bitsandbytes")) > version.parse( "0.37.2" ) if not _bnb_supports_8bit_serialization: logger.warning( "You are calling `save_pretrained` to a 8-bit converted model, but your `bitsandbytes` version doesn't support it. " "If you want to save 8-bit models, make sure to have `bitsandbytes>0.37.2` installed. You will most likely face errors or" " unexpected behaviours." ) return False return True @property def is_trainable(self) -> bool: return version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse("0.37.0") def _dequantize(self, model): from ..integrations import dequantize_and_replace model = dequantize_and_replace( model, self.modules_to_not_convert, quantization_config=self.quantization_config ) return model
class_definition
1,180
13,893
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_8bit.py
null
353
class HfQuantizer(ABC): """ Abstract class of the HuggingFace quantizer. Supports for now quantizing HF transformers models for inference and/or quantization. This class is used only for transformers.PreTrainedModel.from_pretrained and cannot be easily used outside the scope of that method yet. Attributes quantization_config (`transformers.utils.quantization_config.QuantizationConfigMixin`): The quantization config that defines the quantization parameters of your model that you want to quantize. modules_to_not_convert (`List[str]`, *optional*): The list of module names to not convert when quantizing the model. required_packages (`List[str]`, *optional*): The list of required pip packages to install prior to using the quantizer requires_calibration (`bool`): Whether the quantization method requires to calibrate the model before using it. requires_parameters_quantization (`bool`): Whether the quantization method requires to create a new Parameter. For example, for bitsandbytes, it is required to create a new xxxParameter in order to properly quantize the model. """ requires_calibration = False required_packages = None requires_parameters_quantization = False def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs): self.quantization_config = quantization_config # -- Handle extra kwargs below -- self.modules_to_not_convert = kwargs.pop("modules_to_not_convert", []) self.pre_quantized = kwargs.pop("pre_quantized", True) if not self.pre_quantized and self.requires_calibration: raise ValueError( f"The quantization method {quantization_config.quant_method} does require the model to be pre-quantized." f" You explicitly passed `pre_quantized=False` meaning your model weights are not quantized. Make sure to " f"pass `pre_quantized=True` while knowing what you are doing." ) def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": """ Some quantization methods require to explicitly set the dtype of the model to a target dtype. You need to override this method in case you want to make sure that behavior is preserved Args: torch_dtype (`torch.dtype`): The input dtype that is passed in `from_pretrained` """ return torch_dtype def update_device_map(self, device_map: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]: """ Override this method if you want to pass a override the existing device map with a new one. E.g. for bitsandbytes, since `accelerate` is a hard requirement, if no device_map is passed, the device_map is set to `"auto"`` Args: device_map (`Union[dict, str]`, *optional*): The device_map that is passed through the `from_pretrained` method. """ return device_map def adjust_target_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": """ Override this method if you want to adjust the `target_dtype` variable used in `from_pretrained` to compute the device_map in case the device_map is a `str`. E.g. for bitsandbytes we force-set `target_dtype` to `torch.int8` and for 4-bit we pass a custom enum `accelerate.CustomDtype.int4`. Args: torch_dtype (`torch.dtype`, *optional*): The torch_dtype that is used to compute the device_map. """ return torch_dtype def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> List[str]: """ Override this method if you want to adjust the `missing_keys`. Args: missing_keys (`List[str]`, *optional*): The list of missing keys in the checkpoint compared to the state dict of the model """ return missing_keys def update_expected_keys(self, model, expected_keys: List[str], loaded_keys: List[str]) -> List[str]: """ Override this method if you want to adjust the `update_expected_keys`. Args: expected_keys (`List[str]`, *optional*): The list of the expected keys in the initialized model. loaded_keys (`List[str]`, *optional*): The list of the loaded keys in the checkpoint. """ return expected_keys def get_special_dtypes_update(self, model, torch_dtype: "torch.dtype") -> Dict[str, "torch.dtype"]: """ returns dtypes for modules that are not quantized - used for the computation of the device_map in case one passes a str as a device_map. The method will use the `modules_to_not_convert` that is modified in `_process_model_before_weight_loading`. Args: model (`~transformers.PreTrainedModel`): The model to quantize torch_dtype (`torch.dtype`): The dtype passed in `from_pretrained` method. """ return { name: torch_dtype for name, _ in model.named_parameters() if any(m in name for m in self.modules_to_not_convert) } def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]: """adjust max_memory argument for infer_auto_device_map() if extra memory is needed for quantization""" return max_memory def check_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: Dict[str, Any], **kwargs, ) -> bool: """ checks if a loaded state_dict component is part of quantized param + some validation; only defined if requires_parameters_quantization == True for quantization methods that require to create a new parameters for quantization. """ return False def create_quantized_param(self, *args, **kwargs) -> "torch.nn.Parameter": """ takes needed components from state_dict and creates quantized param; only applicable if requires_parameters_quantization == True """ if not self.requires_parameters_quantization: raise AttributeError( f"`.create_quantized_param()` method is not supported by quantizer class {self.__class__.__name__}." ) def validate_environment(self, *args, **kwargs): """ This method is used to potentially check for potential conflicts with arguments that are passed in `from_pretrained`. You need to define it for all future quantizers that are integrated with transformers. If no explicit check are needed, simply return nothing. """ return def preprocess_model(self, model: "PreTrainedModel", **kwargs): """ Setting model attributes and/or converting model before weights loading. At this point the model should be initialized on the meta device so you can freely manipulate the skeleton of the model in order to replace modules in-place. Make sure to override the abstract method `_process_model_before_weight_loading`. Args: model (`~transformers.PreTrainedModel`): The model to quantize kwargs (`dict`, *optional*): The keyword arguments that are passed along `_process_model_before_weight_loading`. """ model.is_quantized = True model.quantization_method = self.quantization_config.quant_method return self._process_model_before_weight_loading(model, **kwargs) def postprocess_model(self, model: "PreTrainedModel", **kwargs): """ Post-process the model post weights loading. Make sure to override the abstract method `_process_model_after_weight_loading`. Args: model (`~transformers.PreTrainedModel`): The model to quantize kwargs (`dict`, *optional*): The keyword arguments that are passed along `_process_model_after_weight_loading`. """ return self._process_model_after_weight_loading(model, **kwargs) def dequantize(self, model): """ Potentially dequantize the model to retrive the original model, with some loss in accuracy / performance. Note not all quantization schemes support this. """ model = self._dequantize(model) # Delete quantizer and quantization config del model.hf_quantizer del model.config.quantization_config del model.config._pre_quantization_dtype model.is_quantized = False return model def _dequantize(self, model): raise NotImplementedError( f"{self.quantization_config.quant_method} has no implementation of `dequantize`, please raise an issue on GitHub." ) @property def is_qat_trainable(self) -> bool: """Flag indicating whether the quantized model can carry out quantization aware training""" return False @abstractmethod def _process_model_before_weight_loading(self, model, **kwargs): ... @abstractmethod def _process_model_after_weight_loading(self, model, **kwargs): ... @abstractmethod def is_serializable(self, safe_serialization=None): ... @property @abstractmethod def is_trainable(self): ...
class_definition
932
10,501
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/base.py
null
354
class HiggsHfQuantizer(HfQuantizer): """ Quantizer of the HIGGS method. Enables the loading of prequantized models and in-flight quantization of full-precision models. """ requires_calibration = False requires_parameters_quantization = True required_packages = ["flute-kernel", "fast_hadamard_transform"] def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs): super().__init__(quantization_config, **kwargs) self.quantization_config = quantization_config def validate_environment(self, device_map, **kwargs): if not torch.cuda.is_available(): raise NotImplementedError("HIGGS quantization is only supported on GPU. Please use a different quantizer.") if not is_accelerate_available(): raise ImportError("Using `higgs` quantization requires Accelerate: `pip install accelerate`") if not is_flute_available(): raise ImportError("Using `higgs` quantization requires FLUTE: `pip install flute-kernel>=0.3.0`") if not is_hadamard_available(): raise ImportError( "Using `higgs` quantization requires fast_hadamard_transform: `pip install fast_hadamard_transform`" ) if device_map is None: raise ValueError( "You are attempting to load a HIGGS model without setting device_map." " Please set device_map comprised of 'cuda' devices." ) elif isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()): raise ValueError( "You are attempting to load a HIGGS model with a device_map that contains a CPU or disk device." " This is not supported. Please remove the CPU or disk device from the device_map." ) def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": if torch_dtype is None: logger.info("`torch_dtype` is None. Setting `torch_dtype=torch.float16` for FLUTE compatibility.") torch_dtype = torch.float16 elif torch_dtype != torch.float16 and torch_dtype != torch.bfloat16: raise ValueError( f"Invalid `torch_dtype` {torch_dtype}. HIGGS quantization only supports `torch_dtype=torch.float16` or `torch_dtype=torch.bfloat16`." ) return torch_dtype def create_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", state_dict: Dict[str, Any], unexpected_keys: Optional[List[str]] = None, ): from ..integrations import quantize_with_higgs """ Quantizes weights into weight and weight_scale """ flute_dict = quantize_with_higgs( param_value.to(target_device), self.quantization_config.bits, self.quantization_config.p, self.quantization_config.group_size, self.quantization_config.hadamard_size, ) del param_value module, tensor_name = get_module_from_name(model, param_name) for key, value in flute_dict.items(): if key in module._parameters: module._parameters[key] = torch.nn.Parameter(value, requires_grad=False) elif key in module._buffers: module._buffers[key] = torch.nn.Buffer(value) else: raise ValueError(f"Unexpected key {key} in module {module}") if unexpected_keys is not None and param_name in unexpected_keys: unexpected_keys.remove(param_name) module.num_sms_packed = torch.nn.Parameter( torch.tensor(get_num_sms_from_device(target_device), device=target_device, dtype=torch.int32), requires_grad=False, ) def _process_model_before_weight_loading( self, model: "PreTrainedModel", **kwargs, ): from ..integrations import replace_with_higgs_linear replace_with_higgs_linear( model, quantization_config=self.quantization_config, ) model.config.quantization_config = self.quantization_config def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): import flute.utils from ..integrations import HiggsLinear flute_workspaces = {} for name, module in model.named_modules(): if isinstance(module, HiggsLinear): # Every HiggsLinear needs a "workspace": a buffer for the unpacking operation. # This buffer needs to be on the same device as the weights, but can be reused across modules otherwise. if module.weight.device not in flute_workspaces: flute_workspaces[module.weight.device] = flute.utils.make_workspace_streamk( device=module.weight.device ) module.workspace = flute_workspaces[module.weight.device] # FLUTE weights are packed in a way that is optimized for a specific number of SMs (GPU streaming multiprocessors). # If the model is loaded on a different device than the one it was saved on, we need to repack the weights. if module.num_sms_packed.item() != get_num_sms_from_device(module.weight.device): new_device = module.weight.device new_num_sms = get_num_sms_from_device(new_device) module.weight.data = flute.utils.pack( flute.utils.unpack( weight=module.weight.data, scales=module.scales.data, workspace=module.workspace, num_bits=module.num_bits, group_size=module.group_size, num_sms_packed=module.num_sms_packed.item(), ).T.contiguous(), module.num_bits, module.group_size, ) module.num_sms_packed = torch.nn.Parameter( torch.tensor(new_num_sms, device=new_device, dtype=torch.int32), requires_grad=False, ) def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> List[str]: from ..integrations import HiggsLinear not_missing_keys = [] for name, module in model.named_modules(): if isinstance(module, HiggsLinear): for missing in missing_keys: if ( (name in missing or name in f"{prefix}.{missing}") and not missing.endswith(".weight") and not missing.endswith(".bias") ): not_missing_keys.append(missing) return [k for k in missing_keys if k not in not_missing_keys] @property def is_trainable(self, model: Optional["PreTrainedModel"] = None): return False def is_serializable(self, safe_serialization=None): return True def check_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: Dict[str, Any], **kwargs, ) -> bool: from ..integrations import HiggsLinear module, tensor_name = get_module_from_name(model, param_name) if isinstance(module, HiggsLinear) and tensor_name == "weight" and param_value.dtype != torch.int16: # Only quantize weights of HiggsLinear modules that are not already quantized return True else: return False def _dequantize(self, model): from ..integrations import dequantize_higgs model = dequantize_higgs(model) return model
class_definition
1,583
9,576
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_higgs.py
null
355
class AutoQuantizationConfig: """ The Auto-HF quantization config class that takes care of automatically dispatching to the correct quantization config given a quantization config stored in a dictionary. """ @classmethod def from_dict(cls, quantization_config_dict: Dict): quant_method = quantization_config_dict.get("quant_method", None) # We need a special care for bnb models to make sure everything is BC .. if quantization_config_dict.get("load_in_8bit", False) or quantization_config_dict.get("load_in_4bit", False): suffix = "_4bit" if quantization_config_dict.get("load_in_4bit", False) else "_8bit" quant_method = QuantizationMethod.BITS_AND_BYTES + suffix elif quant_method is None: raise ValueError( "The model's quantization config from the arguments has no `quant_method` attribute. Make sure that the model has been correctly quantized" ) if quant_method not in AUTO_QUANTIZATION_CONFIG_MAPPING.keys(): raise ValueError( f"Unknown quantization type, got {quant_method} - supported types are:" f" {list(AUTO_QUANTIZER_MAPPING.keys())}" ) target_cls = AUTO_QUANTIZATION_CONFIG_MAPPING[quant_method] return target_cls.from_dict(quantization_config_dict) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): model_config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) if getattr(model_config, "quantization_config", None) is None: raise ValueError( f"Did not found a `quantization_config` in {pretrained_model_name_or_path}. Make sure that the model is correctly quantized." ) quantization_config_dict = model_config.quantization_config quantization_config = cls.from_dict(quantization_config_dict) # Update with potential kwargs that are passed through from_pretrained. quantization_config.update(**kwargs) return quantization_config
class_definition
2,727
4,829
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/auto.py
null
356
class AutoHfQuantizer: """ The Auto-HF quantizer class that takes care of automatically instantiating to the correct `HfQuantizer` given the `QuantizationConfig`. """ @classmethod def from_config(cls, quantization_config: Union[QuantizationConfigMixin, Dict], **kwargs): # Convert it to a QuantizationConfig if the q_config is a dict if isinstance(quantization_config, dict): quantization_config = AutoQuantizationConfig.from_dict(quantization_config) quant_method = quantization_config.quant_method # Again, we need a special care for bnb as we have a single quantization config # class for both 4-bit and 8-bit quantization if quant_method == QuantizationMethod.BITS_AND_BYTES: if quantization_config.load_in_8bit: quant_method += "_8bit" else: quant_method += "_4bit" if quant_method not in AUTO_QUANTIZER_MAPPING.keys(): raise ValueError( f"Unknown quantization type, got {quant_method} - supported types are:" f" {list(AUTO_QUANTIZER_MAPPING.keys())}" ) target_cls = AUTO_QUANTIZER_MAPPING[quant_method] return target_cls(quantization_config, **kwargs) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): quantization_config = AutoQuantizationConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) return cls.from_config(quantization_config) @classmethod def merge_quantization_configs( cls, quantization_config: Union[dict, QuantizationConfigMixin], quantization_config_from_args: Optional[QuantizationConfigMixin], ): """ handles situations where both quantization_config from args and quantization_config from model config are present. """ if quantization_config_from_args is not None: warning_msg = ( "You passed `quantization_config` or equivalent parameters to `from_pretrained` but the model you're loading" " already has a `quantization_config` attribute. The `quantization_config` from the model will be used." ) else: warning_msg = "" if isinstance(quantization_config, dict): quantization_config = AutoQuantizationConfig.from_dict(quantization_config) if ( isinstance(quantization_config, (GPTQConfig, AwqConfig, FbgemmFp8Config, CompressedTensorsConfig)) and quantization_config_from_args is not None ): # special case for GPTQ / AWQ / FbgemmFp8 config collision loading_attr_dict = quantization_config_from_args.get_loading_attributes() for attr, val in loading_attr_dict.items(): setattr(quantization_config, attr, val) warning_msg += f"However, loading attributes (e.g. {list(loading_attr_dict.keys())}) will be overwritten with the one you passed to `from_pretrained`. The rest will be ignored." if warning_msg != "": warnings.warn(warning_msg) return quantization_config
class_definition
4,832
8,014
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/auto.py
null
357
class ImageQuestionAnsweringTool(PipelineTool): default_checkpoint = "dandelin/vilt-b32-finetuned-vqa" description = ( "This is a tool that answers a question about an image. It " "returns a text that is the answer to the question." ) name = "image_qa" pre_processor_class = AutoProcessor model_class = AutoModelForVisualQuestionAnswering inputs = { "image": { "type": "image", "description": "The image containing the information. Can be a PIL Image or a string path to the image.", }, "question": {"type": "string", "description": "The question in English"}, } output_type = "string" def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) super().__init__(*args, **kwargs) def encode(self, image: "Image", question: str): return self.pre_processor(image, question, return_tensors="pt") def forward(self, inputs): with torch.no_grad(): return self.model(**inputs).logits def decode(self, outputs): idx = outputs.argmax(-1).item() return self.model.config.id2label[idx]
class_definition
835
2,003
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/image_question_answering.py
null
358
class Problem: """ A class regrouping all the information to solve a problem on which we will evaluate agents. Args: task (`str` ou `list[str]`): One or several descriptions of the task to perform. If a list, it should contain variations on the phrasing, but for the same task. inputs (`list[str]` or `dict[str, str]`): The inputs that will be fed to the tools. For this testing environment, only strings are accepted as values. Pass along a dictionary when you want to specify the values of each inputs, or just the list of inputs expected (the value used will be `<<input_name>>` in this case). answer (`str` or `list[str]`): The theoretical answer (or list of possible valid answers) to the problem, as code. """ def __init__(self, task, inputs, answer): self.task = task self.inputs = inputs self.answer = answer
class_definition
2,931
3,889
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/evaluate_agent.py
null
359
class ChatMessage: def __init__(self, role, content, metadata=None): self.role = role self.content = content self.metadata = metadata
class_definition
943
1,152
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/monitoring.py
null
360
class ChatMessage: def __init__(self, role, content, metadata=None): self.role = role self.content = content self.metadata = metadata
class_definition
2,409
2,618
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/monitoring.py
null
361
class Monitor: def __init__(self, tracked_llm_engine): self.step_durations = [] self.tracked_llm_engine = tracked_llm_engine if getattr(self.tracked_llm_engine, "last_input_token_count", "Not found") != "Not found": self.total_input_token_count = 0 self.total_output_token_count = 0 def update_metrics(self, step_log): step_duration = step_log["step_duration"] self.step_durations.append(step_duration) logger.info(f"Step {len(self.step_durations)}:") logger.info(f"- Time taken: {step_duration:.2f} seconds (valid only if step succeeded)") if getattr(self.tracked_llm_engine, "last_input_token_count", None) is not None: self.total_input_token_count += self.tracked_llm_engine.last_input_token_count self.total_output_token_count += self.tracked_llm_engine.last_output_token_count logger.info(f"- Input tokens: {self.total_input_token_count}") logger.info(f"- Output tokens: {self.total_output_token_count}")
class_definition
3,628
4,683
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/monitoring.py
null
362
class Tool: """ A base class for the functions used by the agent. Subclass this and implement the `__call__` method as well as the following class attributes: - **description** (`str`) -- A short description of what your tool does, the inputs it expects and the output(s) it will return. For instance 'This is a tool that downloads a file from a `url`. It takes the `url` as input, and returns the text contained in the file'. - **name** (`str`) -- A performative name that will be used for your tool in the prompt to the agent. For instance `"text-classifier"` or `"image_generator"`. - **inputs** (`Dict[str, Dict[str, Union[str, type]]]`) -- The dict of modalities expected for the inputs. It has one `type`key and a `description`key. This is used by `launch_gradio_demo` or to make a nice space from your tool, and also can be used in the generated description for your tool. - **output_type** (`type`) -- The type of the tool output. This is used by `launch_gradio_demo` or to make a nice space from your tool, and also can be used in the generated description for your tool. You can also override the method [`~Tool.setup`] if your tool as an expensive operation to perform before being usable (such as loading a model). [`~Tool.setup`] will be called the first time you use your tool, but not at instantiation. """ name: str description: str inputs: Dict[str, Dict[str, Union[str, type]]] output_type: type def __init__(self, *args, **kwargs): self.is_initialized = False def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) validate_after_init(cls, do_validate_forward=False) def validate_arguments(self, do_validate_forward: bool = True): required_attributes = { "description": str, "name": str, "inputs": dict, "output_type": str, } authorized_types = ["string", "integer", "number", "image", "audio", "any", "boolean"] for attr, expected_type in required_attributes.items(): attr_value = getattr(self, attr, None) if attr_value is None: raise TypeError(f"You must set an attribute {attr}.") if not isinstance(attr_value, expected_type): raise TypeError( f"Attribute {attr} should have type {expected_type.__name__}, got {type(attr_value)} instead." ) for input_name, input_content in self.inputs.items(): assert isinstance(input_content, dict), f"Input '{input_name}' should be a dictionary." assert ( "type" in input_content and "description" in input_content ), f"Input '{input_name}' should have keys 'type' and 'description', has only {list(input_content.keys())}." if input_content["type"] not in authorized_types: raise Exception( f"Input '{input_name}': type '{input_content['type']}' is not an authorized value, should be one of {authorized_types}." ) assert getattr(self, "output_type", None) in authorized_types if do_validate_forward: if not isinstance(self, PipelineTool): signature = inspect.signature(self.forward) if not set(signature.parameters.keys()) == set(self.inputs.keys()): raise Exception( "Tool's 'forward' method should take 'self' as its first argument, then its next arguments should match the keys of tool attribute 'inputs'." ) def forward(self, *args, **kwargs): return NotImplemented("Write this method in your subclass of `Tool`.") def __call__(self, *args, **kwargs): args, kwargs = handle_agent_inputs(*args, **kwargs) outputs = self.forward(*args, **kwargs) return handle_agent_outputs(outputs, self.output_type) def setup(self): """ Overwrite this method here for any operation that is expensive and needs to be executed before you start using your tool. Such as loading a big model. """ self.is_initialized = True def save(self, output_dir): """ Saves the relevant code files for your tool so it can be pushed to the Hub. This will copy the code of your tool in `output_dir` as well as autogenerate: - a config file named `tool_config.json` - an `app.py` file so that your tool can be converted to a space - a `requirements.txt` containing the names of the module used by your tool (as detected when inspecting its code) You should only use this method to save tools that are defined in a separate module (not `__main__`). Args: output_dir (`str`): The folder in which you want to save your tool. """ os.makedirs(output_dir, exist_ok=True) # Save module file if self.__module__ == "__main__": raise ValueError( f"We can't save the code defining {self} in {output_dir} as it's been defined in __main__. You " "have to put this code in a separate module so we can include it in the saved folder." ) module_files = custom_object_save(self, output_dir) module_name = self.__class__.__module__ last_module = module_name.split(".")[-1] full_name = f"{last_module}.{self.__class__.__name__}" # Save config file config_file = os.path.join(output_dir, "tool_config.json") if os.path.isfile(config_file): with open(config_file, "r", encoding="utf-8") as f: tool_config = json.load(f) else: tool_config = {} tool_config = { "tool_class": full_name, "description": self.description, "name": self.name, "inputs": self.inputs, "output_type": str(self.output_type), } with open(config_file, "w", encoding="utf-8") as f: f.write(json.dumps(tool_config, indent=2, sort_keys=True) + "\n") # Save app file app_file = os.path.join(output_dir, "app.py") with open(app_file, "w", encoding="utf-8") as f: f.write(APP_FILE_TEMPLATE.format(module_name=last_module, class_name=self.__class__.__name__)) # Save requirements file requirements_file = os.path.join(output_dir, "requirements.txt") imports = [] for module in module_files: imports.extend(get_imports(module)) imports = list(set(imports)) with open(requirements_file, "w", encoding="utf-8") as f: f.write("\n".join(imports) + "\n") @classmethod def from_hub( cls, repo_id: str, token: Optional[str] = None, **kwargs, ): """ Loads a tool defined on the Hub. <Tip warning={true}> Loading a tool from the Hub means that you'll download the tool and execute it locally. ALWAYS inspect the tool you're downloading before loading it within your runtime, as you would do when installing a package using pip/npm/apt. </Tip> Args: repo_id (`str`): The name of the repo on the Hub where your tool is defined. token (`str`, *optional*): The token to identify you on hf.co. If unset, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). kwargs (additional keyword arguments, *optional*): Additional keyword arguments that will be split in two: all arguments relevant to the Hub (such as `cache_dir`, `revision`, `subfolder`) will be used when downloading the files for your tool, and the others will be passed along to its init. """ hub_kwargs_names = [ "cache_dir", "force_download", "resume_download", "proxies", "revision", "repo_type", "subfolder", "local_files_only", ] hub_kwargs = {k: v for k, v in kwargs.items() if k in hub_kwargs_names} # Try to get the tool config first. hub_kwargs["repo_type"] = get_repo_type(repo_id, **hub_kwargs) resolved_config_file = cached_file( repo_id, TOOL_CONFIG_FILE, token=token, **hub_kwargs, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, ) is_tool_config = resolved_config_file is not None if resolved_config_file is None: resolved_config_file = cached_file( repo_id, CONFIG_NAME, token=token, **hub_kwargs, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, ) if resolved_config_file is None: raise EnvironmentError( f"{repo_id} does not appear to provide a valid configuration in `tool_config.json` or `config.json`." ) with open(resolved_config_file, encoding="utf-8") as reader: config = json.load(reader) if not is_tool_config: if "custom_tool" not in config: raise EnvironmentError( f"{repo_id} does not provide a mapping to custom tools in its configuration `config.json`." ) custom_tool = config["custom_tool"] else: custom_tool = config tool_class = custom_tool["tool_class"] tool_class = get_class_from_dynamic_module(tool_class, repo_id, token=token, **hub_kwargs) if len(tool_class.name) == 0: tool_class.name = custom_tool["name"] if tool_class.name != custom_tool["name"]: logger.warning( f"{tool_class.__name__} implements a different name in its configuration and class. Using the tool " "configuration name." ) tool_class.name = custom_tool["name"] if len(tool_class.description) == 0: tool_class.description = custom_tool["description"] if tool_class.description != custom_tool["description"]: logger.warning( f"{tool_class.__name__} implements a different description in its configuration and class. Using the " "tool configuration description." ) tool_class.description = custom_tool["description"] if tool_class.inputs != custom_tool["inputs"]: tool_class.inputs = custom_tool["inputs"] if tool_class.output_type != custom_tool["output_type"]: tool_class.output_type = custom_tool["output_type"] if not isinstance(tool_class.inputs, dict): tool_class.inputs = ast.literal_eval(tool_class.inputs) return tool_class(**kwargs) def push_to_hub( self, repo_id: str, commit_message: str = "Upload tool", private: Optional[bool] = None, token: Optional[Union[bool, str]] = None, create_pr: bool = False, ) -> str: """ Upload the tool to the Hub. For this method to work properly, your tool must have been defined in a separate module (not `__main__`). For instance: ``` from my_tool_module import MyTool my_tool = MyTool() my_tool.push_to_hub("my-username/my-space") ``` Parameters: repo_id (`str`): The name of the repository you want to push your tool to. It should contain your organization name when pushing to a given organization. commit_message (`str`, *optional*, defaults to `"Upload tool"`): Message to commit while pushing. private (`bool`, *optional*): Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists. token (`bool` or `str`, *optional*): The token to use as HTTP bearer authorization for remote files. If unset, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). create_pr (`bool`, *optional*, defaults to `False`): Whether or not to create a PR with the uploaded files or directly commit. """ repo_url = create_repo( repo_id=repo_id, token=token, private=private, exist_ok=True, repo_type="space", space_sdk="gradio", ) repo_id = repo_url.repo_id metadata_update(repo_id, {"tags": ["tool"]}, repo_type="space") with tempfile.TemporaryDirectory() as work_dir: # Save all files. self.save(work_dir) logger.info(f"Uploading the following files to {repo_id}: {','.join(os.listdir(work_dir))}") return upload_folder( repo_id=repo_id, commit_message=commit_message, folder_path=work_dir, token=token, create_pr=create_pr, repo_type="space", ) @staticmethod def from_space( space_id: str, name: str, description: str, api_name: Optional[str] = None, token: Optional[str] = None ): """ Creates a [`Tool`] from a Space given its id on the Hub. Args: space_id (`str`): The id of the Space on the Hub. name (`str`): The name of the tool. description (`str`): The description of the tool. api_name (`str`, *optional*): The specific api_name to use, if the space has several tabs. If not precised, will default to the first available api. token (`str`, *optional*): Add your token to access private spaces or increase your GPU quotas. Returns: [`Tool`]: The Space, as a tool. Examples: ``` image_generator = Tool.from_space( space_id="black-forest-labs/FLUX.1-schnell", name="image-generator", description="Generate an image from a prompt" ) image = image_generator("Generate an image of a cool surfer in Tahiti") ``` ``` face_swapper = Tool.from_space( "tuan2308/face-swap", "face_swapper", "Tool that puts the face shown on the first image on the second image. You can give it paths to images.", ) image = face_swapper('./aymeric.jpeg', './ruth.jpg') ``` """ from gradio_client import Client, handle_file from gradio_client.utils import is_http_url_like class SpaceToolWrapper(Tool): def __init__( self, space_id: str, name: str, description: str, api_name: Optional[str] = None, token: Optional[str] = None, ): self.client = Client(space_id, hf_token=token) self.name = name self.description = description space_description = self.client.view_api(return_format="dict", print_info=False)["named_endpoints"] # If api_name is not defined, take the first of the available APIs for this space if api_name is None: api_name = list(space_description.keys())[0] logger.warning( f"Since `api_name` was not defined, it was automatically set to the first avilable API: `{api_name}`." ) self.api_name = api_name try: space_description_api = space_description[api_name] except KeyError: raise KeyError(f"Could not find specified {api_name=} among available api names.") self.inputs = {} for parameter in space_description_api["parameters"]: if not parameter["parameter_has_default"]: parameter_type = parameter["type"]["type"] if parameter_type == "object": parameter_type = "any" self.inputs[parameter["parameter_name"]] = { "type": parameter_type, "description": parameter["python_type"]["description"], } output_component = space_description_api["returns"][0]["component"] if output_component == "Image": self.output_type = "image" elif output_component == "Audio": self.output_type = "audio" else: self.output_type = "any" def sanitize_argument_for_prediction(self, arg): if isinstance(arg, ImageType): temp_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False) arg.save(temp_file.name) arg = temp_file.name if (isinstance(arg, (str, Path)) and Path(arg).exists() and Path(arg).is_file()) or is_http_url_like( arg ): arg = handle_file(arg) return arg def forward(self, *args, **kwargs): # Preprocess args and kwargs: args = list(args) for i, arg in enumerate(args): args[i] = self.sanitize_argument_for_prediction(arg) for arg_name, arg in kwargs.items(): kwargs[arg_name] = self.sanitize_argument_for_prediction(arg) output = self.client.predict(*args, api_name=self.api_name, **kwargs) if isinstance(output, tuple) or isinstance(output, list): return output[ 0 ] # Sometime the space also returns the generation seed, in which case the result is at index 0 return output return SpaceToolWrapper(space_id, name, description, api_name=api_name, token=token) @staticmethod def from_gradio(gradio_tool): """ Creates a [`Tool`] from a gradio tool. """ import inspect class GradioToolWrapper(Tool): def __init__(self, _gradio_tool): self.name = _gradio_tool.name self.description = _gradio_tool.description self.output_type = "string" self._gradio_tool = _gradio_tool func_args = list(inspect.signature(_gradio_tool.run).parameters.items()) self.inputs = { key: {"type": CONVERSION_DICT[value.annotation], "description": ""} for key, value in func_args } self.forward = self._gradio_tool.run return GradioToolWrapper(gradio_tool) @staticmethod def from_langchain(langchain_tool): """ Creates a [`Tool`] from a langchain tool. """ class LangChainToolWrapper(Tool): def __init__(self, _langchain_tool): self.name = _langchain_tool.name.lower() self.description = _langchain_tool.description self.inputs = _langchain_tool.args.copy() for input_content in self.inputs.values(): if "title" in input_content: input_content.pop("title") input_content["description"] = "" self.output_type = "string" self.langchain_tool = _langchain_tool def forward(self, *args, **kwargs): tool_input = kwargs.copy() for index, argument in enumerate(args): if index < len(self.inputs): input_key = next(iter(self.inputs)) tool_input[input_key] = argument return self.langchain_tool.run(tool_input) return LangChainToolWrapper(langchain_tool)
class_definition
3,043
23,689
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/tools.py
null
363
class SpaceToolWrapper(Tool): def __init__( self, space_id: str, name: str, description: str, api_name: Optional[str] = None, token: Optional[str] = None, ): self.client = Client(space_id, hf_token=token) self.name = name self.description = description space_description = self.client.view_api(return_format="dict", print_info=False)["named_endpoints"] # If api_name is not defined, take the first of the available APIs for this space if api_name is None: api_name = list(space_description.keys())[0] logger.warning( f"Since `api_name` was not defined, it was automatically set to the first avilable API: `{api_name}`." ) self.api_name = api_name try: space_description_api = space_description[api_name] except KeyError: raise KeyError(f"Could not find specified {api_name=} among available api names.") self.inputs = {} for parameter in space_description_api["parameters"]: if not parameter["parameter_has_default"]: parameter_type = parameter["type"]["type"] if parameter_type == "object": parameter_type = "any" self.inputs[parameter["parameter_name"]] = { "type": parameter_type, "description": parameter["python_type"]["description"], } output_component = space_description_api["returns"][0]["component"] if output_component == "Image": self.output_type = "image" elif output_component == "Audio": self.output_type = "audio" else: self.output_type = "any" def sanitize_argument_for_prediction(self, arg): if isinstance(arg, ImageType): temp_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False) arg.save(temp_file.name) arg = temp_file.name if (isinstance(arg, (str, Path)) and Path(arg).exists() and Path(arg).is_file()) or is_http_url_like( arg ): arg = handle_file(arg) return arg def forward(self, *args, **kwargs): # Preprocess args and kwargs: args = list(args) for i, arg in enumerate(args): args[i] = self.sanitize_argument_for_prediction(arg) for arg_name, arg in kwargs.items(): kwargs[arg_name] = self.sanitize_argument_for_prediction(arg) output = self.client.predict(*args, api_name=self.api_name, **kwargs) if isinstance(output, tuple) or isinstance(output, list): return output[ 0 ] # Sometime the space also returns the generation seed, in which case the result is at index 0 return output
class_definition
18,293
21,668
1
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/tools.py
Tool
364
class GradioToolWrapper(Tool): def __init__(self, _gradio_tool): self.name = _gradio_tool.name self.description = _gradio_tool.description self.output_type = "string" self._gradio_tool = _gradio_tool func_args = list(inspect.signature(_gradio_tool.run).parameters.items()) self.inputs = { key: {"type": CONVERSION_DICT[value.annotation], "description": ""} for key, value in func_args } self.forward = self._gradio_tool.run
class_definition
21,919
22,502
1
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/tools.py
Tool
365
class LangChainToolWrapper(Tool): def __init__(self, _langchain_tool): self.name = _langchain_tool.name.lower() self.description = _langchain_tool.description self.inputs = _langchain_tool.args.copy() for input_content in self.inputs.values(): if "title" in input_content: input_content.pop("title") input_content["description"] = "" self.output_type = "string" self.langchain_tool = _langchain_tool def forward(self, *args, **kwargs): tool_input = kwargs.copy() for index, argument in enumerate(args): if index < len(self.inputs): input_key = next(iter(self.inputs)) tool_input[input_key] = argument return self.langchain_tool.run(tool_input)
class_definition
22,692
23,636
1
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/tools.py
Tool
366
class PipelineTool(Tool): """ A [`Tool`] tailored towards Transformer models. On top of the class attributes of the base class [`Tool`], you will need to specify: - **model_class** (`type`) -- The class to use to load the model in this tool. - **default_checkpoint** (`str`) -- The default checkpoint that should be used when the user doesn't specify one. - **pre_processor_class** (`type`, *optional*, defaults to [`AutoProcessor`]) -- The class to use to load the pre-processor - **post_processor_class** (`type`, *optional*, defaults to [`AutoProcessor`]) -- The class to use to load the post-processor (when different from the pre-processor). Args: model (`str` or [`PreTrainedModel`], *optional*): The name of the checkpoint to use for the model, or the instantiated model. If unset, will default to the value of the class attribute `default_checkpoint`. pre_processor (`str` or `Any`, *optional*): The name of the checkpoint to use for the pre-processor, or the instantiated pre-processor (can be a tokenizer, an image processor, a feature extractor or a processor). Will default to the value of `model` if unset. post_processor (`str` or `Any`, *optional*): The name of the checkpoint to use for the post-processor, or the instantiated pre-processor (can be a tokenizer, an image processor, a feature extractor or a processor). Will default to the `pre_processor` if unset. device (`int`, `str` or `torch.device`, *optional*): The device on which to execute the model. Will default to any accelerator available (GPU, MPS etc...), the CPU otherwise. device_map (`str` or `dict`, *optional*): If passed along, will be used to instantiate the model. model_kwargs (`dict`, *optional*): Any keyword argument to send to the model instantiation. token (`str`, *optional*): The token to use as HTTP bearer authorization for remote files. If unset, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). hub_kwargs (additional keyword arguments, *optional*): Any additional keyword argument to send to the methods that will load the data from the Hub. """ pre_processor_class = AutoProcessor model_class = None post_processor_class = AutoProcessor default_checkpoint = None description = "This is a pipeline tool" name = "pipeline" inputs = {"prompt": str} output_type = str def __init__( self, model=None, pre_processor=None, post_processor=None, device=None, device_map=None, model_kwargs=None, token=None, **hub_kwargs, ): if not is_torch_available(): raise ImportError("Please install torch in order to use this tool.") if not is_accelerate_available(): raise ImportError("Please install accelerate in order to use this tool.") if model is None: if self.default_checkpoint is None: raise ValueError("This tool does not implement a default checkpoint, you need to pass one.") model = self.default_checkpoint if pre_processor is None: pre_processor = model self.model = model self.pre_processor = pre_processor self.post_processor = post_processor self.device = device self.device_map = device_map self.model_kwargs = {} if model_kwargs is None else model_kwargs if device_map is not None: self.model_kwargs["device_map"] = device_map self.hub_kwargs = hub_kwargs self.hub_kwargs["token"] = token super().__init__() def setup(self): """ Instantiates the `pre_processor`, `model` and `post_processor` if necessary. """ if isinstance(self.pre_processor, str): self.pre_processor = self.pre_processor_class.from_pretrained(self.pre_processor, **self.hub_kwargs) if isinstance(self.model, str): self.model = self.model_class.from_pretrained(self.model, **self.model_kwargs, **self.hub_kwargs) if self.post_processor is None: self.post_processor = self.pre_processor elif isinstance(self.post_processor, str): self.post_processor = self.post_processor_class.from_pretrained(self.post_processor, **self.hub_kwargs) if self.device is None: if self.device_map is not None: self.device = list(self.model.hf_device_map.values())[0] else: self.device = PartialState().default_device if self.device_map is None: self.model.to(self.device) super().setup() def encode(self, raw_inputs): """ Uses the `pre_processor` to prepare the inputs for the `model`. """ return self.pre_processor(raw_inputs) def forward(self, inputs): """ Sends the inputs through the `model`. """ with torch.no_grad(): return self.model(**inputs) def decode(self, outputs): """ Uses the `post_processor` to decode the model output. """ return self.post_processor(outputs) def __call__(self, *args, **kwargs): args, kwargs = handle_agent_inputs(*args, **kwargs) if not self.is_initialized: self.setup() encoded_inputs = self.encode(*args, **kwargs) tensor_inputs = {k: v for k, v in encoded_inputs.items() if isinstance(v, torch.Tensor)} non_tensor_inputs = {k: v for k, v in encoded_inputs.items() if not isinstance(v, torch.Tensor)} encoded_inputs = send_to_device(tensor_inputs, self.device) outputs = self.forward({**encoded_inputs, **non_tensor_inputs}) outputs = send_to_device(outputs, "cpu") decoded_outputs = self.decode(outputs) return handle_agent_outputs(decoded_outputs, self.output_type)
class_definition
24,882
31,025
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/tools.py
null
367
class EndpointClient: def __init__(self, endpoint_url: str, token: Optional[str] = None): self.headers = { **build_hf_headers(token=token), "Content-Type": "application/json", } self.endpoint_url = endpoint_url @staticmethod def encode_image(image): _bytes = io.BytesIO() image.save(_bytes, format="PNG") b64 = base64.b64encode(_bytes.getvalue()) return b64.decode("utf-8") @staticmethod def decode_image(raw_image): if not is_vision_available(): raise ImportError( "This tool returned an image but Pillow is not installed. Please install it (`pip install Pillow`)." ) from PIL import Image b64 = base64.b64decode(raw_image) _bytes = io.BytesIO(b64) return Image.open(_bytes) def __call__( self, inputs: Optional[Union[str, Dict, List[str], List[List[str]]]] = None, params: Optional[Dict] = None, data: Optional[bytes] = None, output_image: bool = False, ) -> Any: # Build payload payload = {} if inputs: payload["inputs"] = inputs if params: payload["parameters"] = params # Make API call response = get_session().post(self.endpoint_url, headers=self.headers, json=payload, data=data) # By default, parse the response for the user. if output_image: return self.decode_image(response.content) else: return response.json()
class_definition
35,377
36,954
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/tools.py
null
368
class ToolCollection: """ Tool collections enable loading all Spaces from a collection in order to be added to the agent's toolbox. > [!NOTE] > Only Spaces will be fetched, so you can feel free to add models and datasets to your collection if you'd > like for this collection to showcase them. Args: collection_slug (str): The collection slug referencing the collection. token (str, *optional*): The authentication token if the collection is private. Example: ```py >>> from transformers import ToolCollection, ReactCodeAgent >>> image_tool_collection = ToolCollection(collection_slug="huggingface-tools/diffusion-tools-6630bb19a942c2306a2cdb6f") >>> agent = ReactCodeAgent(tools=[*image_tool_collection.tools], add_base_tools=True) >>> agent.run("Please draw me a picture of rivers and lakes.") ``` """ def __init__(self, collection_slug: str, token: Optional[str] = None): self._collection = get_collection(collection_slug, token=token) self._hub_repo_ids = {item.item_id for item in self._collection.items if item.item_type == "space"} self.tools = {Tool.from_hub(repo_id) for repo_id in self._hub_repo_ids}
class_definition
36,957
38,198
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/tools.py
null
369
class SpecificTool(Tool): name = parameters["name"] description = parameters["description"] inputs = parameters["parameters"]["properties"] output_type = parameters["return"]["type"] @wraps(tool_function) def forward(self, *args, **kwargs): return tool_function(*args, **kwargs)
class_definition
38,825
39,164
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/tools.py
null
370
class MessageRole(str, Enum): USER = "user" ASSISTANT = "assistant" SYSTEM = "system" TOOL_CALL = "tool-call" TOOL_RESPONSE = "tool-response" @classmethod def roles(cls): return [r.value for r in cls]
class_definition
920
1,157
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/llm_engine.py
null
371
class HfEngine: def __init__(self, model_id: Optional[str] = None): self.last_input_token_count = None self.last_output_token_count = None if model_id is None: model_id = "HuggingFaceTB/SmolLM2-1.7B-Instruct" logger.warning(f"Using default model for token counting: '{model_id}'") try: self.tokenizer = AutoTokenizer.from_pretrained(model_id) except Exception as e: logger.warning(f"Failed to load tokenizer for model {model_id}: {e}. Loading default tokenizer instead.") self.tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/SmolLM2-1.7B-Instruct") def get_token_counts(self): return { "input_token_count": self.last_input_token_count, "output_token_count": self.last_output_token_count, } def generate( self, messages: List[Dict[str, str]], stop_sequences: Optional[List[str]] = None, grammar: Optional[str] = None ): raise NotImplementedError def __call__( self, messages: List[Dict[str, str]], stop_sequences: Optional[List[str]] = None, grammar: Optional[str] = None ) -> str: """Process the input messages and return the model's response. This method sends a list of messages to the Hugging Face Inference API, optionally with stop sequences and grammar customization. Parameters: messages (`List[Dict[str, str]]`): A list of message dictionaries to be processed. Each dictionary should have the structure `{"role": "user/system", "content": "message content"}`. stop_sequences (`List[str]`, *optional*): A list of strings that will stop the generation if encountered in the model's output. grammar (`str`, *optional*): The grammar or formatting structure to use in the model's response. Returns: `str`: The text content of the model's response. Example: ```python >>> engine = HfApiEngine( ... model="meta-llama/Meta-Llama-3.1-8B-Instruct", ... token="your_hf_token_here", ... max_tokens=2000 ... ) >>> messages = [{"role": "user", "content": "Explain quantum mechanics in simple terms."}] >>> response = engine(messages, stop_sequences=["END"]) >>> print(response) "Quantum mechanics is the branch of physics that studies..." ``` """ if not isinstance(messages, List): raise ValueError("Messages should be a list of dictionaries with 'role' and 'content' keys.") if stop_sequences is None: stop_sequences = [] response = self.generate(messages, stop_sequences, grammar) self.last_input_token_count = len(self.tokenizer.apply_chat_template(messages, tokenize=True)) self.last_output_token_count = len(self.tokenizer.encode(response)) # Remove stop sequences from LLM output for stop_seq in stop_sequences: if response[-len(stop_seq) :] == stop_seq: response = response[: -len(stop_seq)] return response
class_definition
2,356
5,573
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/llm_engine.py
null
372
class HfApiEngine(HfEngine): """A class to interact with Hugging Face's Inference API for language model interaction. This engine allows you to communicate with Hugging Face's models using the Inference API. It can be used in both serverless mode or with a dedicated endpoint, supporting features like stop sequences and grammar customization. Parameters: model (`str`, *optional*, defaults to `"meta-llama/Meta-Llama-3.1-8B-Instruct"`): The Hugging Face model ID to be used for inference. This can be a path or model identifier from the Hugging Face model hub. token (`str`, *optional*): Token used by the Hugging Face API for authentication. If not provided, the class will use the token stored in the Hugging Face CLI configuration. max_tokens (`int`, *optional*, defaults to 1500): The maximum number of tokens allowed in the output. timeout (`int`, *optional*, defaults to 120): Timeout for the API request, in seconds. Raises: ValueError: If the model name is not provided. """ def __init__( self, model: str = "meta-llama/Meta-Llama-3.1-8B-Instruct", token: Optional[str] = None, max_tokens: Optional[int] = 1500, timeout: Optional[int] = 120, ): super().__init__(model_id=model) self.model = model self.client = InferenceClient(self.model, token=token, timeout=timeout) self.max_tokens = max_tokens def generate( self, messages: List[Dict[str, str]], stop_sequences: Optional[List[str]] = None, grammar: Optional[str] = None ) -> str: # Get clean message list messages = get_clean_message_list(messages, role_conversions=llama_role_conversions) # Send messages to the Hugging Face Inference API if grammar is not None: response = self.client.chat_completion( messages, stop=stop_sequences, max_tokens=self.max_tokens, response_format=grammar ) else: response = self.client.chat_completion(messages, stop=stop_sequences, max_tokens=self.max_tokens) response = response.choices[0].message.content return response
class_definition
5,576
7,837
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/llm_engine.py
null
373
class TransformersEngine(HfEngine): """This engine uses a pre-initialized local text-generation pipeline.""" def __init__(self, pipeline: Pipeline, model_id: Optional[str] = None): super().__init__(model_id) self.pipeline = pipeline def generate( self, messages: List[Dict[str, str]], stop_sequences: Optional[List[str]] = None, grammar: Optional[str] = None, max_length: int = 1500, ) -> str: # Get clean message list messages = get_clean_message_list(messages, role_conversions=llama_role_conversions) # Get LLM output if stop_sequences is not None and len(stop_sequences) > 0: stop_strings = stop_sequences else: stop_strings = None output = self.pipeline( messages, stop_strings=stop_strings, max_length=max_length, tokenizer=self.pipeline.tokenizer, ) response = output[0]["generated_text"][-1]["content"] return response
class_definition
7,840
8,887
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/llm_engine.py
null
374
class SpeechToTextTool(PipelineTool): default_checkpoint = "distil-whisper/distil-large-v3" description = "This is a tool that transcribes an audio into text. It returns the transcribed text." name = "transcriber" pre_processor_class = WhisperProcessor model_class = WhisperForConditionalGeneration inputs = {"audio": {"type": "audio", "description": "The audio to transcribe"}} output_type = "string" def encode(self, audio): return self.pre_processor(audio, return_tensors="pt") def forward(self, inputs): return self.model.generate(inputs["input_features"]) def decode(self, outputs): return self.pre_processor.batch_decode(outputs, skip_special_tokens=True)[0]
class_definition
763
1,495
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/speech_to_text.py
null
375
class CustomFormatter(logging.Formatter): grey = "\x1b[38;20m" bold_yellow = "\x1b[33;1m" red = "\x1b[31;20m" green = "\x1b[32;20m" bold_green = "\x1b[32;20;1m" bold_red = "\x1b[31;1m" bold_white = "\x1b[37;1m" orange = "\x1b[38;5;214m" bold_orange = "\x1b[38;5;214;1m" reset = "\x1b[0m" format = "%(message)s" FORMATS = { logging.DEBUG: grey + format + reset, logging.INFO: format, logging.WARNING: bold_yellow + format + reset, logging.ERROR: red + format + reset, logging.CRITICAL: bold_red + format + reset, 31: reset + format + reset, 32: green + format + reset, 33: bold_green + format + reset, 34: bold_white + format + reset, 35: orange + format + reset, 36: bold_orange + format + reset, } def format(self, record): log_fmt = self.FORMATS.get(record.levelno) formatter = logging.Formatter(log_fmt) return formatter.format(record)
class_definition
1,827
2,835
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py
null
376
class Toolbox: """ The toolbox contains all tools that the agent can perform operations with, as well as a few methods to manage them. Args: tools (`List[Tool]`): The list of tools to instantiate the toolbox with add_base_tools (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to add the tools available within `transformers` to the toolbox. """ def __init__(self, tools: List[Tool], add_base_tools: bool = False): self._tools = {tool.name: tool for tool in tools} if add_base_tools: self.add_base_tools() self._load_tools_if_needed() def add_base_tools(self, add_python_interpreter: bool = False): global _tools_are_initialized global HUGGINGFACE_DEFAULT_TOOLS if not _tools_are_initialized: HUGGINGFACE_DEFAULT_TOOLS = setup_default_tools(logger) _tools_are_initialized = True for tool in HUGGINGFACE_DEFAULT_TOOLS.values(): if tool.name != "python_interpreter" or add_python_interpreter: self.add_tool(tool) self._load_tools_if_needed() @property def tools(self) -> Dict[str, Tool]: """Get all tools currently in the toolbox""" return self._tools def show_tool_descriptions(self, tool_description_template: str = None) -> str: """ Returns the description of all tools in the toolbox Args: tool_description_template (`str`, *optional*): The template to use to describe the tools. If not provided, the default template will be used. """ return "\n".join( [get_tool_description_with_args(tool, tool_description_template) for tool in self._tools.values()] ) def add_tool(self, tool: Tool): """ Adds a tool to the toolbox Args: tool (`Tool`): The tool to add to the toolbox. """ if tool.name in self._tools: raise KeyError(f"Error: tool '{tool.name}' already exists in the toolbox.") self._tools[tool.name] = tool def remove_tool(self, tool_name: str): """ Removes a tool from the toolbox Args: tool_name (`str`): The tool to remove from the toolbox. """ if tool_name not in self._tools: raise KeyError( f"Error: tool {tool_name} not found in toolbox for removal, should be instead one of {list(self._tools.keys())}." ) del self._tools[tool_name] def update_tool(self, tool: Tool): """ Updates a tool in the toolbox according to its name. Args: tool (`Tool`): The tool to update to the toolbox. """ if tool.name not in self._tools: raise KeyError( f"Error: tool {tool.name} not found in toolbox for update, should be instead one of {list(self._tools.keys())}." ) self._tools[tool.name] = tool def clear_toolbox(self): """Clears the toolbox""" self._tools = {} def _load_tools_if_needed(self): for name, tool in self._tools.items(): if not isinstance(tool, Tool): task_or_repo_id = tool.task if tool.repo_id is None else tool.repo_id self._tools[name] = load_tool(task_or_repo_id) def __repr__(self): toolbox_description = "Toolbox contents:\n" for tool in self._tools.values(): toolbox_description += f"\t{tool.name}: {tool.description}\n" return toolbox_description
class_definition
6,369
10,031
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py
null
377
class AgentError(Exception): """Base class for other agent-related exceptions""" def __init__(self, message): super().__init__(message) self.message = message
class_definition
10,034
10,217
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py
null
378
class AgentParsingError(AgentError): """Exception raised for errors in parsing in the agent""" pass
class_definition
10,220
10,328
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py
null
379
class AgentExecutionError(AgentError): """Exception raised for errors in execution in the agent""" pass
class_definition
10,331
10,443
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py
null
380
class AgentMaxIterationsError(AgentError): """Exception raised for errors in execution in the agent""" pass
class_definition
10,446
10,562
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py
null
381
class AgentGenerationError(AgentError): """Exception raised for errors in generation in the agent""" pass
class_definition
10,565
10,679
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py
null
382
class Agent: def __init__( self, tools: Union[List[Tool], Toolbox], llm_engine: Callable = None, system_prompt: Optional[str] = None, tool_description_template: Optional[str] = None, additional_args: Dict = {}, max_iterations: int = 6, tool_parser: Optional[Callable] = None, add_base_tools: bool = False, verbose: int = 0, grammar: Optional[Dict[str, str]] = None, managed_agents: Optional[List] = None, step_callbacks: Optional[List[Callable]] = None, monitor_metrics: bool = True, ): if system_prompt is None: system_prompt = DEFAULT_REACT_CODE_SYSTEM_PROMPT if tool_parser is None: tool_parser = parse_json_tool_call self.agent_name = self.__class__.__name__ self.llm_engine = llm_engine self.system_prompt_template = system_prompt self.tool_description_template = ( tool_description_template if tool_description_template else DEFAULT_TOOL_DESCRIPTION_TEMPLATE ) self.additional_args = additional_args self.max_iterations = max_iterations self.logger = logger self.tool_parser = tool_parser self.grammar = grammar self.managed_agents = None if managed_agents is not None: self.managed_agents = {agent.name: agent for agent in managed_agents} if isinstance(tools, Toolbox): self._toolbox = tools if add_base_tools: if not is_torch_available(): raise ImportError("Using the base tools requires torch to be installed.") self._toolbox.add_base_tools(add_python_interpreter=(self.__class__ == ReactJsonAgent)) else: self._toolbox = Toolbox(tools, add_base_tools=add_base_tools) self._toolbox.add_tool(FinalAnswerTool()) self.system_prompt = format_prompt_with_tools( self._toolbox, self.system_prompt_template, self.tool_description_template ) self.system_prompt = format_prompt_with_managed_agents_descriptions(self.system_prompt, self.managed_agents) self.prompt = None self.logs = [] self.task = None if verbose == 0: logger.setLevel(logging.WARNING) elif verbose == 1: logger.setLevel(logging.INFO) elif verbose == 2: logger.setLevel(logging.DEBUG) # Initialize step callbacks self.step_callbacks = step_callbacks if step_callbacks is not None else [] # Initialize Monitor if monitor_metrics is True self.monitor = None if monitor_metrics: self.monitor = Monitor(self.llm_engine) self.step_callbacks.append(self.monitor.update_metrics) @property def toolbox(self) -> Toolbox: """Get the toolbox currently available to the agent""" return self._toolbox def initialize_for_run(self): self.token_count = 0 self.system_prompt = format_prompt_with_tools( self._toolbox, self.system_prompt_template, self.tool_description_template, ) self.system_prompt = format_prompt_with_managed_agents_descriptions(self.system_prompt, self.managed_agents) if hasattr(self, "authorized_imports"): self.system_prompt = format_prompt_with_imports( self.system_prompt, list(set(LIST_SAFE_MODULES) | set(self.authorized_imports)) ) self.logs = [{"system_prompt": self.system_prompt, "task": self.task}] self.logger.log(33, "======== New task ========") self.logger.log(34, self.task) self.logger.debug("System prompt is as follows:") self.logger.debug(self.system_prompt) def write_inner_memory_from_logs(self, summary_mode: Optional[bool] = False) -> List[Dict[str, str]]: """ Reads past llm_outputs, actions, and observations or errors from the logs into a series of messages that can be used as input to the LLM. """ prompt_message = {"role": MessageRole.SYSTEM, "content": self.logs[0]["system_prompt"]} task_message = { "role": MessageRole.USER, "content": "Task: " + self.logs[0]["task"], } if summary_mode: memory = [task_message] else: memory = [prompt_message, task_message] for i, step_log in enumerate(self.logs[1:]): if "llm_output" in step_log and not summary_mode: thought_message = {"role": MessageRole.ASSISTANT, "content": step_log["llm_output"].strip()} memory.append(thought_message) if "facts" in step_log: thought_message = { "role": MessageRole.ASSISTANT, "content": "[FACTS LIST]:\n" + step_log["facts"].strip(), } memory.append(thought_message) if "plan" in step_log and not summary_mode: thought_message = {"role": MessageRole.ASSISTANT, "content": "[PLAN]:\n" + step_log["plan"].strip()} memory.append(thought_message) if "tool_call" in step_log and summary_mode: tool_call_message = { "role": MessageRole.ASSISTANT, "content": f"[STEP {i} TOOL CALL]: " + str(step_log["tool_call"]).strip(), } memory.append(tool_call_message) if "task" in step_log: tool_call_message = { "role": MessageRole.USER, "content": "New task:\n" + step_log["task"], } memory.append(tool_call_message) if "error" in step_log or "observation" in step_log: if "error" in step_log: message_content = ( f"[OUTPUT OF STEP {i}] -> Error:\n" + str(step_log["error"]) + "\nNow let's retry: take care not to repeat previous errors! If you have retried several times, try a completely different approach.\n" ) elif "observation" in step_log: message_content = f"[OUTPUT OF STEP {i}] -> Observation:\n{step_log['observation']}" tool_response_message = {"role": MessageRole.TOOL_RESPONSE, "content": message_content} memory.append(tool_response_message) return memory def get_succinct_logs(self): return [{key: value for key, value in log.items() if key != "agent_memory"} for log in self.logs] def extract_action(self, llm_output: str, split_token: str) -> str: """ Parse action from the LLM output Args: llm_output (`str`): Output of the LLM split_token (`str`): Separator for the action. Should match the example in the system prompt. """ try: split = llm_output.split(split_token) rationale, action = ( split[-2], split[-1], ) # NOTE: using indexes starting from the end solves for when you have more than one split_token in the output except Exception as e: self.logger.error(e, exc_info=1) raise AgentParsingError( f"Error: No '{split_token}' token provided in your output.\nYour output:\n{llm_output}\n. Be sure to include an action, prefaced with '{split_token}'!" ) return rationale.strip(), action.strip() def execute_tool_call(self, tool_name: str, arguments: Dict[str, str]) -> Any: """ Execute tool with the provided input and returns the result. This method replaces arguments with the actual values from the state if they refer to state variables. Args: tool_name (`str`): Name of the Tool to execute (should be one from self.toolbox). arguments (Dict[str, str]): Arguments passed to the Tool. """ available_tools = self.toolbox.tools if self.managed_agents is not None: available_tools = {**available_tools, **self.managed_agents} if tool_name not in available_tools: error_msg = f"Error: unknown tool {tool_name}, should be instead one of {list(available_tools.keys())}." self.logger.error(error_msg, exc_info=1) raise AgentExecutionError(error_msg) try: if isinstance(arguments, str): observation = available_tools[tool_name](arguments) elif isinstance(arguments, dict): for key, value in arguments.items(): # if the value is the name of a state variable like "image.png", replace it with the actual value if isinstance(value, str) and value in self.state: arguments[key] = self.state[value] observation = available_tools[tool_name](**arguments) else: raise AgentExecutionError( f"Arguments passed to tool should be a dict or string: got a {type(arguments)}." ) return observation except Exception as e: if tool_name in self.toolbox.tools: raise AgentExecutionError( f"Error in tool call execution: {e}\nYou should only use this tool with a correct input.\n" f"As a reminder, this tool's description is the following:\n{get_tool_description_with_args(available_tools[tool_name])}" ) elif tool_name in self.managed_agents: raise AgentExecutionError( f"Error in calling team member: {e}\nYou should only ask this team member with a correct request.\n" f"As a reminder, this team member's description is the following:\n{available_tools[tool_name]}" ) def log_rationale_code_action(self, rationale: str, code_action: str) -> None: self.logger.warning("=== Agent thoughts:") self.logger.log(31, rationale) self.logger.warning(">>> Agent is executing the code below:") if is_pygments_available(): self.logger.log( 31, highlight(code_action, PythonLexer(ensurenl=False), Terminal256Formatter(style="nord")) ) else: self.logger.log(31, code_action) self.logger.warning("====") def run(self, **kwargs): """To be implemented in the child class""" raise NotImplementedError
class_definition
12,433
23,079
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py
null
383
class CodeAgent(Agent): """ A class for an agent that solves the given task using a single block of code. It plans all its actions, then executes all in one shot. """ def __init__( self, tools: List[Tool], llm_engine: Optional[Callable] = None, system_prompt: Optional[str] = None, tool_description_template: Optional[str] = None, grammar: Optional[Dict[str, str]] = None, additional_authorized_imports: Optional[List[str]] = None, **kwargs, ): if llm_engine is None: llm_engine = HfApiEngine() if system_prompt is None: system_prompt = DEFAULT_CODE_SYSTEM_PROMPT if tool_description_template is None: tool_description_template = DEFAULT_TOOL_DESCRIPTION_TEMPLATE super().__init__( tools=tools, llm_engine=llm_engine, system_prompt=system_prompt, tool_description_template=tool_description_template, grammar=grammar, **kwargs, ) if not is_pygments_available(): transformers_logging.warning_once( logger, "pygments isn't installed. Installing pygments will enable color syntax highlighting in the " "CodeAgent.", ) self.python_evaluator = evaluate_python_code self.additional_authorized_imports = additional_authorized_imports if additional_authorized_imports else [] self.authorized_imports = list(set(LIST_SAFE_MODULES) | set(self.additional_authorized_imports)) self.system_prompt = self.system_prompt.replace("<<authorized_imports>>", str(self.authorized_imports)) def parse_code_blob(self, result: str) -> str: """ Override this method if you want to change the way the code is cleaned in the `run` method. """ return parse_code_blob(result) def run(self, task: str, return_generated_code: bool = False, **kwargs): """ Runs the agent for the given task. Args: task (`str`): The task to perform return_generated_code (`bool`, *optional*, defaults to `False`): Whether to return the generated code instead of running it kwargs (additional keyword arguments, *optional*): Any keyword argument to send to the agent when evaluating the code. Example: ```py from transformers.agents import CodeAgent agent = CodeAgent(tools=[]) agent.run("What is the result of 2 power 3.7384?") ``` """ self.task = task if len(kwargs) > 0: self.task += f"\nYou have been provided with these initial arguments: {str(kwargs)}." self.state = kwargs.copy() self.initialize_for_run() # Run LLM prompt_message = {"role": MessageRole.SYSTEM, "content": self.system_prompt} task_message = { "role": MessageRole.USER, "content": "Task: " + self.task, } self.prompt = [prompt_message, task_message] self.logger.info("====Executing with this prompt====") self.logger.info(self.prompt) additional_args = {"grammar": self.grammar} if self.grammar is not None else {} llm_output = self.llm_engine(self.prompt, stop_sequences=["<end_action>"], **additional_args) if return_generated_code: return llm_output # Parse try: rationale, code_action = self.extract_action(llm_output=llm_output, split_token="Code:") except Exception as e: self.logger.debug( f"Error in extracting action, trying to parse the whole output as code. Error trace: {e}" ) rationale, code_action = "", llm_output try: code_action = self.parse_code_blob(code_action) except Exception as e: error_msg = f"Error in code parsing: {e}. Be sure to provide correct code" self.logger.error(error_msg, exc_info=1) return error_msg # Execute self.log_rationale_code_action(rationale, code_action) try: available_tools = {**BASE_PYTHON_TOOLS.copy(), **self.toolbox.tools} output = self.python_evaluator( code_action, static_tools=available_tools, custom_tools={}, state=self.state, authorized_imports=self.authorized_imports, ) self.logger.info(self.state["print_outputs"]) return output except Exception as e: error_msg = f"Error in execution: {e}. Be sure to provide correct code." self.logger.error(error_msg, exc_info=1) return error_msg
class_definition
23,082
27,909
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py
null
384
class ReactAgent(Agent): """ This agent that solves the given task step by step, using the ReAct framework: While the objective is not reached, the agent will perform a cycle of thinking and acting. The action will be parsed from the LLM output: it consists in calls to tools from the toolbox, with arguments chosen by the LLM engine. """ def __init__( self, tools: List[Tool], llm_engine: Optional[Callable] = None, system_prompt: Optional[str] = None, tool_description_template: Optional[str] = None, grammar: Optional[Dict[str, str]] = None, plan_type: Optional[str] = None, planning_interval: Optional[int] = None, **kwargs, ): if llm_engine is None: llm_engine = HfApiEngine() if system_prompt is None: system_prompt = DEFAULT_REACT_CODE_SYSTEM_PROMPT if tool_description_template is None: tool_description_template = DEFAULT_TOOL_DESCRIPTION_TEMPLATE if plan_type is None: plan_type = SUPPORTED_PLAN_TYPES[0] else: assert plan_type in SUPPORTED_PLAN_TYPES, f"plan type {plan_type} is not supported" super().__init__( tools=tools, llm_engine=llm_engine, system_prompt=system_prompt, tool_description_template=tool_description_template, grammar=grammar, **kwargs, ) self.planning_interval = planning_interval self.plan_type = plan_type def provide_final_answer(self, task) -> str: """ This method provides a final answer to the task, based on the logs of the agent's interactions. """ self.prompt = [ { "role": MessageRole.SYSTEM, "content": "An agent tried to answer an user query but it got stuck and failed to do so. You are tasked with providing an answer instead. Here is the agent's memory:", } ] self.prompt += self.write_inner_memory_from_logs()[1:] self.prompt += [ { "role": MessageRole.USER, "content": f"Based on the above, please provide an answer to the following user request:\n{task}", } ] try: return self.llm_engine(self.prompt) except Exception as e: return f"Error in generating final llm output: {e}." def run(self, task: str, stream: bool = False, reset: bool = True, **kwargs): """ Runs the agent for the given task. Args: task (`str`): The task to perform Example: ```py from transformers.agents import ReactCodeAgent agent = ReactCodeAgent(tools=[]) agent.run("What is the result of 2 power 3.7384?") ``` """ self.task = task if len(kwargs) > 0: self.task += f"\nYou have been provided with these initial arguments: {str(kwargs)}." self.state = kwargs.copy() if reset: self.initialize_for_run() else: self.logs.append({"task": task}) if stream: return self.stream_run(task) else: return self.direct_run(task) def stream_run(self, task: str): """ Runs the agent in streaming mode, yielding steps as they are executed: should be launched only in the `run` method. """ final_answer = None iteration = 0 while final_answer is None and iteration < self.max_iterations: step_start_time = time.time() step_log_entry = {"iteration": iteration, "start_time": step_start_time} try: self.step(step_log_entry) if "final_answer" in step_log_entry: final_answer = step_log_entry["final_answer"] except AgentError as e: self.logger.error(e, exc_info=1) step_log_entry["error"] = e finally: step_end_time = time.time() step_log_entry["step_end_time"] = step_end_time step_log_entry["step_duration"] = step_end_time - step_start_time self.logs.append(step_log_entry) for callback in self.step_callbacks: callback(step_log_entry) iteration += 1 yield step_log_entry if final_answer is None and iteration == self.max_iterations: error_message = "Reached max iterations." final_step_log = {"error": AgentMaxIterationsError(error_message)} self.logs.append(final_step_log) self.logger.error(error_message, exc_info=1) final_answer = self.provide_final_answer(task) final_step_log["final_answer"] = final_answer final_step_log["step_duration"] = 0 for callback in self.step_callbacks: callback(final_step_log) yield final_step_log yield final_answer def direct_run(self, task: str): """ Runs the agent in direct mode, returning outputs only at the end: should be launched only in the `run` method. """ final_answer = None iteration = 0 while final_answer is None and iteration < self.max_iterations: step_start_time = time.time() step_log_entry = {"iteration": iteration, "start_time": step_start_time} try: if self.planning_interval is not None and iteration % self.planning_interval == 0: self.planning_step(task, is_first_step=(iteration == 0), iteration=iteration) self.step(step_log_entry) if "final_answer" in step_log_entry: final_answer = step_log_entry["final_answer"] except AgentError as e: self.logger.error(e, exc_info=1) step_log_entry["error"] = e finally: step_end_time = time.time() step_log_entry["step_end_time"] = step_end_time step_log_entry["step_duration"] = step_end_time - step_start_time self.logs.append(step_log_entry) for callback in self.step_callbacks: callback(step_log_entry) iteration += 1 if final_answer is None and iteration == self.max_iterations: error_message = "Reached max iterations." final_step_log = {"error": AgentMaxIterationsError(error_message)} self.logs.append(final_step_log) self.logger.error(error_message, exc_info=1) final_answer = self.provide_final_answer(task) final_step_log["final_answer"] = final_answer final_step_log["step_duration"] = 0 for callback in self.step_callbacks: callback(final_step_log) return final_answer def planning_step(self, task, is_first_step: bool = False, iteration: int = None): """ Used periodically by the agent to plan the next steps to reach the objective. Args: task (`str`): The task to perform is_first_step (`bool`): If this step is not the first one, the plan should be an update over a previous plan. iteration (`int`): The number of the current step, used as an indication for the LLM. """ if is_first_step: message_prompt_facts = {"role": MessageRole.SYSTEM, "content": SYSTEM_PROMPT_FACTS} message_prompt_task = { "role": MessageRole.USER, "content": f"""Here is the task: ``` {task} ``` Now begin!""", } answer_facts = self.llm_engine([message_prompt_facts, message_prompt_task]) message_system_prompt_plan = { "role": MessageRole.SYSTEM, "content": PROMPTS_FOR_INITIAL_PLAN[self.plan_type]["system"], } message_user_prompt_plan = { "role": MessageRole.USER, "content": PROMPTS_FOR_INITIAL_PLAN[self.plan_type]["user"].format( task=task, tool_descriptions=self._toolbox.show_tool_descriptions(self.tool_description_template), managed_agents_descriptions=( show_agents_descriptions(self.managed_agents) if self.managed_agents is not None else "" ), answer_facts=answer_facts, ), } answer_plan = self.llm_engine( [message_system_prompt_plan, message_user_prompt_plan], stop_sequences=["<end_plan>"] ) final_plan_redaction = f"""Here is the plan of action that I will follow to solve the task: ``` {answer_plan} ```""" final_facts_redaction = f"""Here are the facts that I know so far: ``` {answer_facts} ```""".strip() self.logs.append({"plan": final_plan_redaction, "facts": final_facts_redaction}) self.logger.log(36, "===== Initial plan =====") self.logger.log(35, final_plan_redaction) else: # update plan agent_memory = self.write_inner_memory_from_logs( summary_mode=False ) # This will not log the plan but will log facts # Redact updated facts facts_update_system_prompt = { "role": MessageRole.SYSTEM, "content": SYSTEM_PROMPT_FACTS_UPDATE, } facts_update_message = { "role": MessageRole.USER, "content": USER_PROMPT_FACTS_UPDATE, } facts_update = self.llm_engine([facts_update_system_prompt] + agent_memory + [facts_update_message]) # Redact updated plan plan_update_message = { "role": MessageRole.SYSTEM, "content": PROMPTS_FOR_PLAN_UPDATE[self.plan_type]["system"].format(task=task), } plan_update_message_user = { "role": MessageRole.USER, "content": PROMPTS_FOR_PLAN_UPDATE[self.plan_type]["user"].format( task=task, tool_descriptions=self._toolbox.show_tool_descriptions(self.tool_description_template), managed_agents_descriptions=( show_agents_descriptions(self.managed_agents) if self.managed_agents is not None else "" ), facts_update=facts_update, remaining_steps=(self.max_iterations - iteration), ), } plan_update = self.llm_engine( [plan_update_message] + agent_memory + [plan_update_message_user], stop_sequences=["<end_plan>"] ) # Log final facts and plan final_plan_redaction = PLAN_UPDATE_FINAL_PLAN_REDACTION.format(task=task, plan_update=plan_update) final_facts_redaction = f"""Here is the updated list of the facts that I know: ``` {facts_update} ```""" self.logs.append({"plan": final_plan_redaction, "facts": final_facts_redaction}) self.logger.log(36, "===== Updated plan =====") self.logger.log(35, final_plan_redaction)
class_definition
27,912
39,268
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py
null
385
class ReactJsonAgent(ReactAgent): """ This agent that solves the given task step by step, using the ReAct framework: While the objective is not reached, the agent will perform a cycle of thinking and acting. The tool calls will be formulated by the LLM in JSON format, then parsed and executed. """ def __init__( self, tools: List[Tool], llm_engine: Optional[Callable] = None, system_prompt: Optional[str] = None, tool_description_template: Optional[str] = None, grammar: Optional[Dict[str, str]] = None, planning_interval: Optional[int] = None, **kwargs, ): if llm_engine is None: llm_engine = HfApiEngine() if system_prompt is None: system_prompt = DEFAULT_REACT_JSON_SYSTEM_PROMPT if tool_description_template is None: tool_description_template = DEFAULT_TOOL_DESCRIPTION_TEMPLATE super().__init__( tools=tools, llm_engine=llm_engine, system_prompt=system_prompt, tool_description_template=tool_description_template, grammar=grammar, planning_interval=planning_interval, **kwargs, ) def step(self, log_entry: Dict[str, Any]): """ Perform one step in the ReAct framework: the agent thinks, acts, and observes the result. The errors are raised here, they are caught and logged in the run() method. """ agent_memory = self.write_inner_memory_from_logs() self.prompt = agent_memory self.logger.debug("===== New step =====") # Add new step in logs log_entry["agent_memory"] = agent_memory.copy() self.logger.info("===== Calling LLM with this last message: =====") self.logger.info(self.prompt[-1]) try: additional_args = {"grammar": self.grammar} if self.grammar is not None else {} llm_output = self.llm_engine( self.prompt, stop_sequences=["<end_action>", "Observation:"], **additional_args ) except Exception as e: raise AgentGenerationError(f"Error in generating llm output: {e}.") self.logger.debug("===== Output message of the LLM: =====") self.logger.debug(llm_output) log_entry["llm_output"] = llm_output # Parse self.logger.debug("===== Extracting action =====") rationale, action = self.extract_action(llm_output=llm_output, split_token="Action:") try: tool_name, arguments = self.tool_parser(action) except Exception as e: raise AgentParsingError(f"Could not parse the given action: {e}.") log_entry["rationale"] = rationale log_entry["tool_call"] = {"tool_name": tool_name, "tool_arguments": arguments} # Execute self.logger.warning("=== Agent thoughts:") self.logger.log(31, rationale) self.logger.warning(f">>> Calling tool: '{tool_name}' with arguments: {arguments}") if tool_name == "final_answer": if isinstance(arguments, dict): if "answer" in arguments: answer = arguments["answer"] if ( isinstance(answer, str) and answer in self.state.keys() ): # if the answer is a state variable, return the value answer = self.state[answer] else: answer = arguments else: answer = arguments log_entry["final_answer"] = answer return answer else: if arguments is None: arguments = {} observation = self.execute_tool_call(tool_name, arguments) observation_type = type(observation) if observation_type in [AgentImage, AgentAudio]: if observation_type == AgentImage: observation_name = "image.png" elif observation_type == AgentAudio: observation_name = "audio.mp3" # TODO: observation naming could allow for different names of same type self.state[observation_name] = observation updated_information = f"Stored '{observation_name}' in memory." else: updated_information = str(observation).strip() self.logger.info(updated_information) log_entry["observation"] = updated_information return log_entry
class_definition
39,271
43,831
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py
null
386
class ReactCodeAgent(ReactAgent): """ This agent that solves the given task step by step, using the ReAct framework: While the objective is not reached, the agent will perform a cycle of thinking and acting. The tool calls will be formulated by the LLM in code format, then parsed and executed. """ def __init__( self, tools: List[Tool], llm_engine: Optional[Callable] = None, system_prompt: Optional[str] = None, tool_description_template: Optional[str] = None, grammar: Optional[Dict[str, str]] = None, additional_authorized_imports: Optional[List[str]] = None, planning_interval: Optional[int] = None, **kwargs, ): if llm_engine is None: llm_engine = HfApiEngine() if system_prompt is None: system_prompt = DEFAULT_REACT_CODE_SYSTEM_PROMPT if tool_description_template is None: tool_description_template = DEFAULT_TOOL_DESCRIPTION_TEMPLATE super().__init__( tools=tools, llm_engine=llm_engine, system_prompt=system_prompt, tool_description_template=tool_description_template, grammar=grammar, planning_interval=planning_interval, **kwargs, ) if not is_pygments_available(): transformers_logging.warning_once( logger, "pygments isn't installed. Installing pygments will enable color syntax highlighting in the " "ReactCodeAgent.", ) self.python_evaluator = evaluate_python_code self.additional_authorized_imports = additional_authorized_imports if additional_authorized_imports else [] self.authorized_imports = list(set(LIST_SAFE_MODULES) | set(self.additional_authorized_imports)) self.system_prompt = self.system_prompt.replace("<<authorized_imports>>", str(self.authorized_imports)) self.custom_tools = {} def step(self, log_entry: Dict[str, Any]): """ Perform one step in the ReAct framework: the agent thinks, acts, and observes the result. The errors are raised here, they are caught and logged in the run() method. """ agent_memory = self.write_inner_memory_from_logs() self.prompt = agent_memory.copy() self.logger.debug("===== New step =====") # Add new step in logs log_entry["agent_memory"] = agent_memory.copy() self.logger.info("===== Calling LLM with these last messages: =====") self.logger.info(self.prompt[-2:]) try: additional_args = {"grammar": self.grammar} if self.grammar is not None else {} llm_output = self.llm_engine( self.prompt, stop_sequences=["<end_action>", "Observation:"], **additional_args ) except Exception as e: raise AgentGenerationError(f"Error in generating llm output: {e}.") self.logger.debug("=== Output message of the LLM:") self.logger.debug(llm_output) log_entry["llm_output"] = llm_output # Parse self.logger.debug("=== Extracting action ===") try: rationale, raw_code_action = self.extract_action(llm_output=llm_output, split_token="Code:") except Exception as e: self.logger.debug(f"Error in extracting action, trying to parse the whole output. Error trace: {e}") rationale, raw_code_action = llm_output, llm_output try: code_action = parse_code_blob(raw_code_action) except Exception as e: error_msg = f"Error in code parsing: {e}. Make sure to provide correct code" raise AgentParsingError(error_msg) log_entry["rationale"] = rationale log_entry["tool_call"] = {"tool_name": "code interpreter", "tool_arguments": code_action} # Execute self.log_rationale_code_action(rationale, code_action) try: static_tools = { **BASE_PYTHON_TOOLS.copy(), **self.toolbox.tools, } if self.managed_agents is not None: static_tools = {**static_tools, **self.managed_agents} result = self.python_evaluator( code_action, static_tools=static_tools, custom_tools=self.custom_tools, state=self.state, authorized_imports=self.authorized_imports, ) self.logger.warning("Print outputs:") self.logger.log(32, self.state["print_outputs"]) observation = "Print outputs:\n" + self.state["print_outputs"] if result is not None: self.logger.warning("Last output from code snippet:") self.logger.log(32, str(result)) observation += "Last output from code snippet:\n" + str(result)[:100000] log_entry["observation"] = observation except Exception as e: error_msg = f"Code execution failed due to the following error:\n{str(e)}" if "'dict' object has no attribute 'read'" in str(e): error_msg += "\nYou get this error because you passed a dict as input for one of the arguments instead of a string." raise AgentExecutionError(error_msg) for line in code_action.split("\n"): if line[: len("final_answer")] == "final_answer": self.logger.log(33, "Final answer:") self.logger.log(32, result) log_entry["final_answer"] = result return result
class_definition
43,834
49,471
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py
null
387
class ManagedAgent: def __init__(self, agent, name, description, additional_prompting=None, provide_run_summary=False): self.agent = agent self.name = name self.description = description self.additional_prompting = additional_prompting self.provide_run_summary = provide_run_summary def write_full_task(self, task): full_task = f"""You're a helpful agent named '{self.name}'. You have been submitted this task by your manager. --- Task: {task} --- You're helping your manager solve a wider task: so make sure to not provide a one-line answer, but give as much information as possible so that they have a clear understanding of the answer. Your final_answer WILL HAVE to contain these parts: ### 1. Task outcome (short version): ### 2. Task outcome (extremely detailed version): ### 3. Additional context (if relevant): Put all these in your final_answer tool, everything that you do not pass as an argument to final_answer will be lost. And even if your task resolution is not successful, please return as much context as possible, so that your manager can act upon this feedback. <<additional_prompting>>""" if self.additional_prompting: full_task = full_task.replace("\n<<additional_prompting>>", self.additional_prompting).strip() else: full_task = full_task.replace("\n<<additional_prompting>>", "").strip() return full_task def __call__(self, request, **kwargs): full_task = self.write_full_task(request) output = self.agent.run(full_task, **kwargs) if self.provide_run_summary: answer = f"Here is the final answer from your managed agent '{self.name}':\n" answer += str(output) answer += f"\n\nFor more detail, find below a summary of this agent's work:\nSUMMARY OF WORK FROM AGENT '{self.name}':\n" for message in self.agent.write_inner_memory_from_logs(summary_mode=True): content = message["content"] if len(str(content)) < LENGTH_TRUNCATE_REPORTS or "[FACTS LIST]" in str(content): answer += "\n" + str(content) + "\n---" else: answer += ( "\n" + str(content)[:LENGTH_TRUNCATE_REPORTS] + "\n(...Step was truncated because too long)...\n---" ) answer += f"\nEND OF SUMMARY OF WORK FROM AGENT '{self.name}'." return answer else: return output
class_definition
49,507
52,065
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/agents.py
null
388
class TextToSpeechTool(PipelineTool): default_checkpoint = "microsoft/speecht5_tts" description = ( "This is a tool that reads an English text out loud. It returns a waveform object containing the sound." ) name = "text_to_speech" pre_processor_class = SpeechT5Processor model_class = SpeechT5ForTextToSpeech post_processor_class = SpeechT5HifiGan inputs = {"text": {"type": "string", "description": "The text to read out loud (in English)"}} output_type = "audio" def setup(self): if self.post_processor is None: self.post_processor = "microsoft/speecht5_hifigan" super().setup() def encode(self, text, speaker_embeddings=None): inputs = self.pre_processor(text=text, return_tensors="pt", truncation=True) if speaker_embeddings is None: if not is_datasets_available(): raise ImportError("Datasets needs to be installed if not passing speaker embeddings.") embeddings_dataset = load_dataset( "Matthijs/cmu-arctic-xvectors", split="validation", trust_remote_code=True ) speaker_embeddings = torch.tensor(embeddings_dataset[7305]["xvector"]).unsqueeze(0) return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings} def forward(self, inputs): with torch.no_grad(): return self.model.generate_speech(**inputs) def decode(self, outputs): with torch.no_grad(): return self.post_processor(outputs).cpu().detach()
class_definition
898
2,467
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/text_to_speech.py
null
389
class DocumentQuestionAnsweringTool(PipelineTool): default_checkpoint = "naver-clova-ix/donut-base-finetuned-docvqa" description = "This is a tool that answers a question about an document (pdf). It returns a string that contains the answer to the question." name = "document_qa" pre_processor_class = AutoProcessor model_class = VisionEncoderDecoderModel inputs = { "document": { "type": "image", "description": "The image containing the information. Can be a PIL Image or a string path to the image.", }, "question": {"type": "string", "description": "The question in English"}, } output_type = "string" def __init__(self, *args, **kwargs): if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.") super().__init__(*args, **kwargs) def encode(self, document: "Image", question: str): task_prompt = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" prompt = task_prompt.replace("{user_input}", question) decoder_input_ids = self.pre_processor.tokenizer( prompt, add_special_tokens=False, return_tensors="pt" ).input_ids if isinstance(document, str): img = Image.open(document).convert("RGB") img_array = np.array(img).transpose(2, 0, 1) document = torch.from_numpy(img_array) pixel_values = self.pre_processor(document, return_tensors="pt").pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def forward(self, inputs): return self.model.generate( inputs["pixel_values"].to(self.device), decoder_input_ids=inputs["decoder_input_ids"].to(self.device), max_length=self.model.decoder.config.max_position_embeddings, early_stopping=True, pad_token_id=self.pre_processor.tokenizer.pad_token_id, eos_token_id=self.pre_processor.tokenizer.eos_token_id, use_cache=True, num_beams=1, bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]], return_dict_in_generate=True, ).sequences def decode(self, outputs): sequence = self.pre_processor.batch_decode(outputs)[0] sequence = sequence.replace(self.pre_processor.tokenizer.eos_token, "") sequence = sequence.replace(self.pre_processor.tokenizer.pad_token, "") sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token sequence = self.pre_processor.token2json(sequence) return sequence["answer"]
class_definition
931
3,633
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/document_question_answering.py
null
390
class TranslationTool(PipelineTool): """ Example: ```py from transformers.agents import TranslationTool translator = TranslationTool() translator("This is a super nice API!", src_lang="English", tgt_lang="French") ``` """ lang_to_code = LANGUAGE_CODES default_checkpoint = "facebook/nllb-200-distilled-600M" description = ( "This is a tool that translates text from a language to another." f"Both `src_lang`and `tgt_lang` should belong to this list of languages: {list(lang_to_code.keys())}." ) name = "translator" pre_processor_class = AutoTokenizer model_class = AutoModelForSeq2SeqLM inputs = { "text": {"type": "string", "description": "The text to translate"}, "src_lang": { "type": "string", "description": "The language of the text to translate. Written in plain English, such as 'Romanian', or 'Albanian'", }, "tgt_lang": { "type": "string", "description": "The language for the desired ouput language. Written in plain English, such as 'Romanian', or 'Albanian'", }, } output_type = "string" def encode(self, text, src_lang, tgt_lang): if src_lang not in self.lang_to_code: raise ValueError(f"{src_lang} is not a supported language.") if tgt_lang not in self.lang_to_code: raise ValueError(f"{tgt_lang} is not a supported language.") src_lang = self.lang_to_code[src_lang] tgt_lang = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( text, return_tensors="pt", src_lang=src_lang, tgt_lang=tgt_lang ) def forward(self, inputs): return self.model.generate(**inputs) def decode(self, outputs): return self.post_processor.decode(outputs[0].tolist(), skip_special_tokens=True)
class_definition
6,764
8,670
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/translation.py
null
391
class PreTool: name: str inputs: Dict[str, str] output_type: type task: str description: str repo_id: str
class_definition
2,141
2,270
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/default_tools.py
null
392
class PythonInterpreterTool(Tool): name = "python_interpreter" description = "This is a tool that evaluates python code. It can be used to perform calculations." output_type = "string" def __init__(self, *args, authorized_imports=None, **kwargs): if authorized_imports is None: self.authorized_imports = list(set(LIST_SAFE_MODULES)) else: self.authorized_imports = list(set(LIST_SAFE_MODULES) | set(authorized_imports)) self.inputs = { "code": { "type": "string", "description": ( "The code snippet to evaluate. All variables used in this snippet must be defined in this same snippet, " f"else you will get an error. This code can only import the following python libraries: {authorized_imports}." ), } } super().__init__(*args, **kwargs) def forward(self, code): output = str( evaluate_python_code(code, static_tools=BASE_PYTHON_TOOLS, authorized_imports=self.authorized_imports) ) return output
class_definition
3,828
4,959
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/default_tools.py
null
393
class FinalAnswerTool(Tool): name = "final_answer" description = "Provides a final answer to the given problem." inputs = {"answer": {"type": "any", "description": "The final answer to the problem"}} output_type = "any" def forward(self, answer): return answer
class_definition
4,962
5,251
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/default_tools.py
null
394
class DuckDuckGoSearchTool(Tool): name = "web_search" description = """Perform a web search based on your query (think a Google search) then returns the top search results as a list of dict elements. Each result has keys 'title', 'href' and 'body'.""" inputs = {"query": {"type": "string", "description": "The search query to perform."}} output_type = "any" def forward(self, query: str) -> str: try: from duckduckgo_search import DDGS except ImportError: raise ImportError( "You must install package `duckduckgo_search` to run this tool: for instance run `pip install duckduckgo-search`." ) results = DDGS().text(query, max_results=7) return results
class_definition
752
1,511
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/search.py
null
395
class VisitWebpageTool(Tool): name = "visit_webpage" description = "Visits a webpage at the given url and returns its content as a markdown string." inputs = { "url": { "type": "string", "description": "The url of the webpage to visit.", } } output_type = "string" def forward(self, url: str) -> str: try: from markdownify import markdownify except ImportError: raise ImportError( "You must install package `markdownify` to run this tool: for instance run `pip install markdownify`." ) try: # Send a GET request to the URL response = requests.get(url) response.raise_for_status() # Raise an exception for bad status codes # Convert the HTML content to Markdown markdown_content = markdownify(response.text).strip() # Remove multiple line breaks markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content) return markdown_content except RequestException as e: return f"Error fetching the webpage: {str(e)}" except Exception as e: return f"An unexpected error occurred: {str(e)}"
class_definition
1,514
2,776
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/search.py
null
396
class InterpreterError(ValueError): """ An error raised when the interpretor cannot evaluate a Python expression, due to syntax error or unsupported operations. """ pass
class_definition
933
1,123
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/python_interpreter.py
null
397
class BreakException(Exception): pass
class_definition
1,571
1,612
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/python_interpreter.py
null
398
class ContinueException(Exception): pass
class_definition
1,615
1,659
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/agents/python_interpreter.py
null
399