text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
Examples: ``` python >>> import torch >>> from transformers import AutoProcessor, WhisperForConditionalGeneration, GenerationConfig >>> from datasets import load_dataset >>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en") >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = processor(ds[3]["audio"]["array"], return_tensors="pt") >>> input_features = inputs.input_features
10,689
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
>>> #Displaying timestamps >>> generated_ids = model.generate(inputs=input_features, return_timestamps=True) >>> transcription = processor.batch_decode(generated_ids, decode_with_timestamps=True)[0] >>> print("Transcription:", transcription) Transcription: <|startoftranscript|><|0.00|> He has grave doubts whether Sir Frederick Layton's work is really Greek after all, and can<|6.44|><|6.44|> discover in it but little of rocky Ithaca.<|9.44|><|endoftext|>
10,689
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
>>> #No timestamps & change EOS: >>> #This allows the user to select a specific token to terminate the sequence on, in this case it's the word "can"(460) >>> model.generation_config.eos_token_id = 460 >>> generated_ids = model.generate(inputs=input_features,return_timestamps=False) >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> print("Transcription:", transcription) Transcription: He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can ``` """ def __init__( self, generate_config, begin_index: Optional[int] = None, _detect_timestamp_from_logprob: Optional[bool] = None, ): # support for the kwargs self.no_timestamps_token_id = generate_config.no_timestamps_token_id self.timestamp_begin = generate_config.no_timestamps_token_id + 1 self.eos_token_id = generate_config.eos_token_id or generate_config.bos_token_id
10,689
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
# this variable is mostly just used for testing self._detect_timestamp_from_logprob = ( _detect_timestamp_from_logprob if _detect_timestamp_from_logprob is not None else getattr(generate_config, "_detect_timestamp_from_logprob", True) ) num_forced_ids = ( len(generate_config.forced_decoder_ids) if generate_config.forced_decoder_ids is not None else 0 ) self.begin_index = begin_index or (num_forced_ids + 1) self.max_initial_timestamp_index = getattr(generate_config, "max_initial_timestamp_index", None) # TODO(Patrick): Make sure that official models have max_initial_timestamp_index set to 50 # self.max_initial_timestamp_index = 50 def set_begin_index(self, begin_index): self.begin_index = begin_index
10,689
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: # suppress <|notimestamps|> which is handled by without_timestamps scores_processed = scores.clone() scores_processed[:, self.no_timestamps_token_id] = -float("inf") # timestamps have to appear in pairs, except directly before eos_token; mask logits accordingly for k in range(input_ids.shape[0]): sampled_tokens = input_ids[k, self.begin_index :] seq = list(sampled_tokens.tolist()) last_was_timestamp = len(seq) >= 1 and seq[-1] >= self.timestamp_begin penultimate_was_timestamp = len(seq) < 2 or seq[-2] >= self.timestamp_begin
10,689
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
if last_was_timestamp: if penultimate_was_timestamp: # has to be non-timestamp scores_processed[k, self.timestamp_begin :] = -float("inf") else: # cannot be normal text tokens scores_processed[k, : self.eos_token_id] = -float("inf") timestamps = sampled_tokens[sampled_tokens.ge(self.timestamp_begin)] if timestamps.numel() > 0: # `timestamps` shouldn't decrease; forbid timestamp tokens smaller than the last # The following lines of code are copied from: https://github.com/openai/whisper/pull/914/files#r1137085090 if last_was_timestamp and not penultimate_was_timestamp: timestamp_last = timestamps[-1] else: # Avoid to emit <|0.00|> again timestamp_last = timestamps[-1] + 1 scores_processed[k, self.timestamp_begin : timestamp_last] = -float("inf")
10,689
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
# apply the `max_initial_timestamp` option if input_ids.shape[1] == self.begin_index: scores_processed[:, : self.timestamp_begin] = -float("inf") if self.max_initial_timestamp_index is not None: last_allowed = self.timestamp_begin + self.max_initial_timestamp_index scores_processed[:, last_allowed + 1 :] = -float("inf") # if sum of probability over timestamps is above any other token, sample timestamp logprobs = torch.nn.functional.log_softmax(scores_processed.float(), dim=-1) for k in range(input_ids.shape[0]): timestamp_logprob = logprobs[k, self.timestamp_begin :].logsumexp(dim=-1) max_text_token_logprob = logprobs[k, : self.timestamp_begin].max() if timestamp_logprob > max_text_token_logprob and self._detect_timestamp_from_logprob: scores_processed[k, : self.timestamp_begin] = -float("inf") return scores_processed
10,689
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
class WhisperNoSpeechDetection(LogitsProcessor): r"""This processor can be used to detect silence when using Whisper. It should take as input unprocessed logits to follow the original implementation""" def __init__(self, no_speech_token: int, begin_index: int, scores_is_logprobs: bool = False): self.no_speech_token = no_speech_token # offset between <start-of-transcription> token, <SOT>, in paper and first generated token # is equal to the position of the first generated token index self.start_of_trans_offset = begin_index # `self.begin_index` is a running value that is changed on the fly self.begin_index = begin_index self._no_speech_prob = [0.0] self.is_scores_logprobs = scores_is_logprobs # overwritten dynamically self.model = None self.inputs = None def set_model(self, model): self.model = model
10,690
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
def set_inputs(self, inputs): self.inputs = {**self.model.prepare_inputs_for_generation(**inputs), **inputs} self.inputs["input_features"] = self.inputs.pop("inputs") @property def no_speech_prob(self): return self._no_speech_prob def set_begin_index(self, begin_index): self.begin_index = begin_index @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: is_scores_logprobs = self.is_scores_logprobs if input_ids.shape[1] == self.begin_index: if self.start_of_trans_offset > 1: with torch.no_grad(): logits = self.model(**self.inputs).logits no_speech_index = self.begin_index - self.start_of_trans_offset no_speech_scores = logits[:, no_speech_index] is_scores_logprobs = False else: no_speech_scores = scores
10,690
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
if is_scores_logprobs: probs = no_speech_scores.exp() else: probs = no_speech_scores.float().softmax(dim=-1) self._no_speech_prob = probs[:, self.no_speech_token] return scores
10,690
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
class ClassifierFreeGuidanceLogitsProcessor(LogitsProcessor): r""" [`LogitsProcessor`] for classifier free guidance (CFG). The scores are split over the batch dimension, where the first half correspond to the conditional logits (predicted from the input prompt) and the second half correspond to the unconditional logits (predicted from an empty or 'null' prompt). The processor computes a weighted average across the conditional and unconditional logits, parameterised by the `guidance_scale`. See [the paper](https://arxiv.org/abs/2306.05284) for more information. <Tip warning={true}> This logits processor is exclusively compatible with [MusicGen](https://huggingface.co/docs/transformers/main/en/model_doc/musicgen) </Tip>
10,691
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
Args: guidance_scale (float): The guidance scale for classifier free guidance (CFG). CFG is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages the model to generate samples that are more closely linked to the input prompt, usually at the expense of poorer quality. Examples: ```python >>> from transformers import AutoProcessor, MusicgenForConditionalGeneration >>> processor = AutoProcessor.from_pretrained("facebook/musicgen-small") >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") >>> inputs = processor( ... text=["80s pop track with bassy drums and synth", "90s rock song with loud guitars and heavy drums"], ... padding=True, ... return_tensors="pt", ... ) >>> audio_values = model.generate(**inputs, do_sample=True, guidance_scale=3, max_new_tokens=256) ``` """
10,691
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
def __init__(self, guidance_scale): if guidance_scale > 1: self.guidance_scale = guidance_scale else: raise ValueError( "Require guidance scale >1 to use the classifier free guidance processor, got guidance scale " f"{guidance_scale}." )
10,691
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: # simple check to make sure we have compatible batch sizes between our # logits scores (cond + uncond) and input ids (cond only) if scores.shape[0] != 2 * input_ids.shape[0]: raise ValueError( f"Logits should have twice the batch size of the input ids, the first half of batches corresponding to " f"the conditional inputs, and the second half of batches corresponding to the unconditional inputs. Got " f"batch size {scores.shape[0]} for the logits and {input_ids.shape[0]} for the input ids." ) unguided_bsz = scores.shape[0] // 2 cond_logits, uncond_logits = scores.split(unguided_bsz, dim=0) scores_processed = uncond_logits + (cond_logits - uncond_logits) * self.guidance_scale return scores_processed
10,691
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
class AlternatingCodebooksLogitsProcessor(LogitsProcessor): r""" [`LogitsProcessor`] enforcing alternated generation between the two codebooks of Bark. <Tip warning={true}> This logits processor is exclusively compatible with [Bark](https://huggingface.co/docs/transformers/en/model_doc/bark)'s fine submodel. See the model documentation for examples. </Tip> Args: input_start_len (`int`): The length of the initial input sequence. semantic_vocab_size (`int`): Vocabulary size of the semantic part, i.e number of tokens associated to the semantic vocabulary. codebook_size (`int`): Number of tokens associated to the codebook. """ def __init__(self, input_start_len: int, semantic_vocab_size: int, codebook_size: int): if not isinstance(input_start_len, int) or input_start_len < 0: raise ValueError(f"`input_starting_length` has to be a non-negative integer, but is {input_start_len}")
10,692
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
self.input_start_len = input_start_len self.semantic_vocab_size = semantic_vocab_size self.codebook_size = codebook_size def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: curr_len = input_ids.shape[-1] # even -> first codebook, odd -> second codebook is_first_codebook = ((curr_len - self.input_start_len) % 2) == 0 scores_processed = scores.clone() if is_first_codebook: scores_processed[:, : self.semantic_vocab_size] = -float("inf") scores_processed[:, self.semantic_vocab_size + self.codebook_size :] = -float("inf") else: scores_processed[:, : self.semantic_vocab_size + self.codebook_size] = -float("inf") return scores_processed
10,692
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
class UnbatchedClassifierFreeGuidanceLogitsProcessor(LogitsProcessor): r""" Logits processor for Classifier-Free Guidance (CFG). The processors computes a weighted average across scores from prompt conditional and prompt unconditional (or negative) logits, parameterized by the `guidance_scale`. The unconditional scores are computed internally by prompting `model` with the `unconditional_ids` branch. See [the paper](https://arxiv.org/abs/2306.17806) for more information.
10,693
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
Args: guidance_scale (`float`): The guidance scale for classifier free guidance (CFG). CFG is enabled by setting `guidance_scale != 1`. Higher guidance scale encourages the model to generate samples that are more closely linked to the input prompt, usually at the expense of poorer quality. A value smaller than 1 has the opposite effect, while making the negative prompt provided with negative_prompt_ids (if any) act as a positive prompt. model (`PreTrainedModel`): The model computing the unconditional scores. Supposedly the same as the one computing the conditional scores. Both models must use the same tokenizer. unconditional_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of input sequence tokens in the vocabulary for the unconditional branch. If unset, will default to the last token of the prompt.
10,693
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
unconditional_attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Attention mask for unconditional_ids. use_cache (`bool`, *optional*, defaults to `True`): Whether to cache key/values during the negative prompt forward pass.
10,693
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
Examples: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") >>> inputs = tokenizer(["Today, a dragon flew over Paris, France,"], return_tensors="pt") >>> out = model.generate(inputs["input_ids"], guidance_scale=1.5) >>> tokenizer.batch_decode(out, skip_special_tokens=True)[0] 'Today, a dragon flew over Paris, France, killing at least 50 people and injuring more than 100' >>> # with a negative prompt >>> neg_inputs = tokenizer(["A very happy event happened,"], return_tensors="pt") >>> out = model.generate(inputs["input_ids"], guidance_scale=2, negative_prompt_ids=neg_inputs["input_ids"]) >>> tokenizer.batch_decode(out, skip_special_tokens=True)[0] 'Today, a dragon flew over Paris, France, killing at least 130 people. French media reported that'
10,693
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
>>> # with a positive prompt >>> neg_inputs = tokenizer(["A very happy event happened,"], return_tensors="pt") >>> out = model.generate(inputs["input_ids"], guidance_scale=0, negative_prompt_ids=neg_inputs["input_ids"]) >>> tokenizer.batch_decode(out, skip_special_tokens=True)[0] "Today, a dragon flew over Paris, France, and I'm very happy to be here. I" ``` """ def __init__( self, guidance_scale: float, model, unconditional_ids: Optional[torch.LongTensor] = None, unconditional_attention_mask: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = True, ): self.guidance_scale = guidance_scale self.model = model self.unconditional_context = { "input_ids": unconditional_ids, "attention_mask": unconditional_attention_mask, "use_cache": use_cache, "past_key_values": None, "first_pass": True, }
10,693
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
def get_unconditional_logits(self, input_ids): if self.unconditional_context["first_pass"]: if self.unconditional_context["input_ids"] is None: self.unconditional_context["input_ids"] = input_ids[:, -1:] if self.unconditional_context["attention_mask"] is None: self.unconditional_context["attention_mask"] = torch.ones_like( self.unconditional_context["input_ids"], dtype=torch.long ) input_ids = self.unconditional_context["input_ids"] attention_mask = self.unconditional_context["attention_mask"] self.unconditional_context["first_pass"] = False else: attention_mask = torch.cat( [ self.unconditional_context["attention_mask"], torch.ones_like(input_ids[:, -1:], dtype=torch.long), ], dim=1, ) if not self.unconditional_context["use_cache"]:
10,693
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
input_ids = torch.cat([self.unconditional_context["input_ids"], input_ids[:, -1:]], dim=1) else: input_ids = input_ids[:, -1:] self.unconditional_context["input_ids"] = input_ids self.unconditional_context["attention_mask"] = attention_mask
10,693
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
out = self.model( input_ids, attention_mask=attention_mask, use_cache=self.unconditional_context["use_cache"], past_key_values=self.unconditional_context["past_key_values"], ) self.unconditional_context["past_key_values"] = out.get("past_key_values", None) return out.logits def __call__(self, input_ids, scores): scores = torch.nn.functional.log_softmax(scores, dim=-1) if self.guidance_scale == 1: return scores logits = self.get_unconditional_logits(input_ids) unconditional_logits = torch.nn.functional.log_softmax(logits[:, -1], dim=-1) scores_processed = self.guidance_scale * (scores - unconditional_logits) + unconditional_logits return scores_processed
10,693
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
class BarkEosPrioritizerLogitsProcessor(LogitsProcessor): r"""This processor ensures that the EOS token is selected if its probability is greater than the `min_eos_p`. <Tip warning={true}> This logits processor is exclusively compatible with [Bark](https://huggingface.co/docs/transformers/en/model_doc/bark). See the model documentation for examples. </Tip> Args: eos_token_id (`Union[int, List[int], torch.Tensor]`): The id(s) of the *end-of-sequence* token. min_eos_p (`float`, *optional*): Minimum end of speech threshold. """ def __init__(self, eos_token_id: Union[int, List[int], torch.Tensor], min_eos_p: float, device: str = "cpu"): if not isinstance(eos_token_id, torch.Tensor): if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] eos_token_id = torch.tensor(eos_token_id, device=device) self.eos_token_id = eos_token_id
10,694
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
if torch.is_floating_point(eos_token_id) or (eos_token_id < 0).any(): raise ValueError(f"`eos_token_id` has to be a list of positive integers, but is {eos_token_id}") if min_eos_p is not None and min_eos_p <= 0: raise ValueError(f"`min_eos_p` has to be a positive float, but is {min_eos_p}") self.min_eos_p = min_eos_p @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: scores_processed = scores if self.min_eos_p: probs = torch.nn.functional.softmax(scores.float(), dim=-1) # create scores full of -inf except for the eos_token_id early_stop_scores = torch.ones_like(scores) * -float("inf") early_stop_scores[:, self.eos_token_id] = scores[:, self.eos_token_id]
10,694
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
do_early_stop = probs[:, self.eos_token_id] > self.min_eos_p do_early_stop = torch.any(do_early_stop, dim=1, keepdim=True) scores_processed = torch.where(do_early_stop, early_stop_scores, scores) return scores_processed
10,694
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
class WatermarkLogitsProcessor(LogitsProcessor): r""" Logits processor for watermarking generated text. The processor modifies model output scores by adding a small bias to randomized set of "green" tokens before generating the next token. "Green" tokens selection process depends on the `seeding_scheme` used. The code was based on the [original repo](https://github.com/jwkirchenbauer/lm-watermarking/tree/main). The text generated by this `LogitsProcessor` can be detected using `WatermarkDetector`. See [`~WatermarkDetector.__call__`] for details, See [the paper](https://arxiv.org/abs/2306.04634) for more information.
10,695
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
Args: vocab_size (`int`): The model tokenizer's vocab_size. Used to calculate "green" tokens ratio. device (`str`): The device where model is allocated. greenlist_ratio (`float`, optional, *optional*, defaults to 0.25): The ratio of "green" tokens used to the vocabulary size. Defaults to 0.25. bias (`float`, optional, *optional*, defaults to 2.0): The bias added to the selected "green" tokens' logits. Consider lowering the `bias` if the text generation quality degrades. Recommended values are in the range of [0.5, 2.0]. Defaults to 2.0. hashing_key (`int`, optional, *optional*, defaults to 15485863): Key used for hashing. If you deploy this watermark, we advise using another private key. Defaults to 15485863 (the millionth prime). seeding_scheme (`str`, optional, *optional*, defaults to `"lefthash"`):
10,695
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
The seeding scheme used for selecting "green" tokens. Accepts values: - "lefthash" (default): "green" tokens selection depend on the last token (Algorithm 2 from paper) - "selfhash": "green" tokens selection depends on the current token itself (Algorithm 3 from paper) The downside of this scheme is that it considers all possible next tokens and can be slower than "lefthash". The context length of previous tokens to use in seeding. Higher context length makes watermarking more robust. context_width (`int`, *optional*, defaults to 1): The number of previous tokens to use when setting the seed.
10,695
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
Examples: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM, WatermarkingConfig >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") >>> inputs = tokenizer(["Alice and Bob are"], return_tensors="pt") >>> # normal generation >>> out = model.generate(inputs["input_ids"], max_length=20, do_sample=False) >>> tokenizer.batch_decode(out, skip_special_tokens=True)[0] 'Alice and Bob are both in the same room.\n\n"I\'m not sure if you\'re' >>> # watermarked generation >>> watermarking_config = WatermarkingConfig(bias=2.5, context_width=2, seeding_scheme="selfhash") >>> out = model.generate(inputs["input_ids"], watermarking_config=watermarking_config, max_length=20, do_sample=False) >>> tokenizer.batch_decode(out, skip_special_tokens=True)[0] 'Alice and Bob are both still alive and well and the story is pretty much a one-hour adventure'
10,695
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
>>> # to detect watermarked text use the WatermarkDetector class >>> from transformers import WatermarkDetector >>> detector = WatermarkDetector(model_config=model.config, device="cpu", watermarking_config= watermarking_config) >>> detection_preds = detector(out) >>> detection_preds array([ True]) ``` """ def __init__( self, vocab_size, device, greenlist_ratio: float = 0.25, bias: float = 2.0, hashing_key: int = 15485863, seeding_scheme: str = "lefthash", context_width: int = 1, ): if seeding_scheme not in ["selfhash", "lefthash"]: raise ValueError(f"seeding_scheme has to be one of [`selfhash`, `lefthash`], but found {seeding_scheme}") if greenlist_ratio >= 1.0 or greenlist_ratio <= 0.0: raise ValueError( f"greenlist_ratio has be in range between 0.0 and 1.0, exclusively. but found {greenlist_ratio}" )
10,695
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
self.vocab_size = vocab_size self.greenlist_size = int(self.vocab_size * greenlist_ratio) self.bias = bias self.seeding_scheme = seeding_scheme self.rng = torch.Generator(device=device) self.hash_key = hashing_key self.context_width = context_width self.rng.manual_seed(hashing_key) self.table_size = 1_000_003 self.fixed_table = torch.randperm(self.table_size, generator=self.rng, device=device) def set_seed(self, input_seq: torch.LongTensor): input_seq = input_seq[-self.context_width :] if self.seeding_scheme == "selfhash": a = self.fixed_table[input_seq % self.table_size] + 1 b = self.fixed_table[input_seq[-1] % self.table_size] + 1 seed = (self.hash_key * a * b).min().item() else: seed = self.hash_key * input_seq[-1].item() self.rng.manual_seed(seed % (2**64 - 1))
10,695
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
def _get_greenlist_ids(self, input_seq: torch.LongTensor) -> torch.LongTensor: self.set_seed(input_seq) vocab_permutation = torch.randperm(self.vocab_size, device=input_seq.device, generator=self.rng) greenlist_ids = vocab_permutation[: self.greenlist_size] return greenlist_ids def _score_rejection_sampling(self, input_seq: torch.LongTensor, scores: torch.FloatTensor) -> torch.LongTensor: """ Generate greenlist based on current candidate next token. Reject and move on if necessary. Runs for a fixed number of steps only for efficiency, since the methods is not batched. """ final_greenlist = [] _, greedy_predictions = scores.sort(dim=-1, descending=True)
10,695
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
# 40 is an arbitrary number chosen to save compute and not run for long (taken from orig repo) for i in range(40): greenlist_ids = self._get_greenlist_ids(torch.cat([input_seq, greedy_predictions[i, None]], dim=-1)) if greedy_predictions[i] in greenlist_ids: final_greenlist.append(greedy_predictions[i]) return torch.tensor(final_greenlist, device=input_seq.device) @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: if input_ids.shape[-1] < self.context_width: logger.warning( f"`input_ids` should have at least `{self.context_width}` tokens but has {input_ids.shape[-1]}. " "The seeding will be skipped for this generation step!" ) return scores
10,695
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
scores_processed = scores.clone() for b_idx, input_seq in enumerate(input_ids): if self.seeding_scheme == "selfhash": greenlist_ids = self._score_rejection_sampling(input_seq, scores[b_idx]) else: greenlist_ids = self._get_greenlist_ids(input_seq) scores_processed[b_idx, greenlist_ids] = scores_processed[b_idx, greenlist_ids] + self.bias return scores_processed
10,695
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
class SynthIDTextWatermarkState: """SynthID watermarking state.""" def __init__( self, batch_size: int, ngram_len: int, context_history_size: int, device: torch.device, ): """Initializes the state. Args: batch_size (`int`): Batch size. ngram_len (`int`): Ngram length. context_history_size (`int`): Size of the tensor to keep track of seen contexts. device (`int`): Device to use. """ self.context = torch.zeros( (batch_size, ngram_len - 1), dtype=torch.int64, device=device, ) self.context_history = torch.zeros( (batch_size, context_history_size), dtype=torch.int64, device=device, ) self.num_calls = 0
10,696
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
class SynthIDTextWatermarkLogitsProcessor(LogitsProcessor): r""" Logits processor that implements watermarking techniques for text generation models. This class facilitates the application of SynthID text watermarking, a method for embedding imperceptible signals into generated text to aid in detecting synthetic content. It operates by subtly manipulating the probabilities of token selection during text generation in a manner that can be reliably recovered later for verification. Key Features: * **State Management:** Maintains internal state to track token sequences and generate watermarking keys dynamically. * **Key Generation:** Computes hashes based on token sequences and watermarking parameters to create unique keys for each position. * **G-Value Sampling:** Employs a pre-computed sampling table to sample watermarking values (g-values) based on the generated keys.
10,697
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
* **Score Adjustment:** Applies calculated g-values to modify token probabilities during generation, embedding the watermark. * **Context Repetition Handling:** Incorporates logic to avoid watermarking tokens in repeated contexts, preserving naturalness. * **EOS Token Masking:** Supports masking end-of-sentence tokens to prevent their inclusion in watermarking calculations. * **Utility Functions:** Provides functions to compute g-values directly, check for context repetition, create EOS token masks, and estimate expected mean g-values. Refer to paper url: https://www.nature.com/articles/s41586-024-08025-4 for more details around this.
10,697
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
Args: ngram_len (`int`): Ngram length. keys (`List[int]`): A sequence of watermarking keys, one for each depth. sampling_table_size (`int`): Size of the sampling table. sampling_table_seed (`int`): Random seed to generate the sampling table. context_history_size (`int`): Size of the tensor to keep track of seen contexts. device (`torch.device`): Device to use. skip_first_ngram_calls (`bool`, *optional*, defaults to `False`): Whether to skip first ngram calls. debug_mode (`bool`, optional, *optional*, defaults to `False`): Logits are modified to uniform one got before watermarking modification is applied. This is to test the implementation. Examples: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, SynthIDTextWatermarkingConfig
10,697
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
>>> tokenizer = AutoTokenizer.from_pretrained('google/gemma-2-2b', padding_side="left") >>> model = AutoModelForCausalLM.from_pretrained('google/gemma-2-2b') >>> # SynthID Text configuration >>> watermarking_config = SynthIDTextWatermarkingConfig( ... keys=[654, 400, 836, 123, 340, 443, 597, 160, 57], ... ngram_len=5, ... ) >>> # Generation with watermarking >>> tokenized_prompts = tokenizer(["Once upon a time, "], return_tensors="pt", padding=True) >>> output_sequences = model.generate( ... **tokenized_prompts, watermarking_config=watermarking_config, do_sample=True, max_new_tokens=10 ... ) >>> watermarked_text = tokenizer.batch_decode(output_sequences, skip_special_tokens=True) ``` """
10,697
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
def __init__( self, ngram_len: int, keys: List[int], sampling_table_size: int, sampling_table_seed: int, context_history_size: int, device: torch.device, skip_first_ngram_calls: bool = False, debug_mode: bool = False, ): self.ngram_len = ngram_len self.keys = torch.tensor(keys, device=device)
10,697
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
generator = torch.Generator(device=device).manual_seed(sampling_table_seed) # A random sampling table is pre-computed and modulo table size is applied to map from a hash of ngram keys to # g values, this is similar to the hashtable implementation used in # https://github.com/facebookresearch/three_bricks. We note that the hashing employed in this repository is # different from that used to watermark the Gemini App, and hence the detectors trained based on the # hashing in this repository will not transfer to text generated by the Gemini App. self.sampling_table = torch.randint( low=0, high=2, size=(sampling_table_size,), generator=generator, device=device, ) self.context_history_size = context_history_size self.device = device self.state = None self.skip_first_ngram_calls = skip_first_ngram_calls self.debug_mode = debug_mode
10,697
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
def _init_state(self, batch_size: int): """Initializes the state.""" self.state = SynthIDTextWatermarkState( batch_size=batch_size, ngram_len=self.ngram_len, context_history_size=self.context_history_size, device=self.device, ) def update_scores(self, scores: torch.FloatTensor, g_values: torch.FloatTensor) -> torch.FloatTensor: """Updates scores using the g values. We assume that the scores are in the log space. Args: scores (`torch.FloatTensor`): Scores (batch_size, vocab_size). g_values (`torch.FloatTensor`): G valus (batch_size, vocab_size, depth). Returns: Updated scores (batch_size, vocab_size). """ _, _, depth = g_values.shape probs = torch.softmax(scores, dim=1)
10,697
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
for i in range(depth): g_values_at_depth = g_values[:, :, i] g_mass_at_depth = (g_values_at_depth * probs).sum(axis=1, keepdims=True) probs = probs * (1 + g_values_at_depth - g_mass_at_depth) log_probs = torch.log(probs) log_probs = torch.where(torch.isfinite(log_probs), log_probs, torch.finfo(log_probs.dtype).min) return log_probs @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: self._check_input_ids_shape(input_ids) batch_size, vocab_size = scores.shape if self.debug_mode: scores = torch.ones_like(scores) # Currently indices is just a arange to compute watermarking on the desnse logits. all_indices = torch.stack([torch.arange(vocab_size, device=self.device) for _ in range(batch_size)])
10,697
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
if self.state is None: # Initialize watermarking state if it does not exist. self._init_state(batch_size) else: # Append last input id (which is the input id added in last call) to the # previous context so we have the context to be used for current # watermarking. self.state.context = torch.concat( (self.state.context, input_ids[:, -1:]), dim=1, ) self.state.context = self.state.context[:, 1:] if self.state is None: raise ValueError("self.state can't be None! Call `self._init_state` to initialize the state.") self.state.num_calls += 1 # Don't watermark the first ngram_len - 1 tokens if set. if self.skip_first_ngram_calls and self.state.num_calls < self.ngram_len: return scores
10,697
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
# 2. Generate random keys for each ngram key combination. ngram_keys, hash_result_with_just_context = self._compute_keys(self.state.context, all_indices) # ngram_keys shape [batch_size, top_k, depth] # 3. Sample g values. g_values = self.sample_g_values(ngram_keys) # g_values shape [batch_size, top_k, depth] # 4. Modify scores. updated_scores = self.update_scores(scores, g_values) # updated scores shape [batch_size, top_k] # 5. Check if the current watermarking context was previously used, if yes skip watermarking. hash_result_with_just_context = hash_result_with_just_context[:, None] is_repeated_context = (self.state.context_history == hash_result_with_just_context).any( dim=1, keepdim=True, ) self.state.context_history = torch.concat( (hash_result_with_just_context, self.state.context_history), dim=1, )[:, :-1]
10,697
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
updated_watermarked_scores = torch.where( is_repeated_context, input=scores, other=updated_scores, ) return updated_watermarked_scores def accumulate_hash( self, current_hash: torch.LongTensor, data: torch.LongTensor, multiplier: int = 6364136223846793005, increment: int = 1, ) -> torch.LongTensor: """ Accumulate hash of data on current hash. Method uses adapted linear congruential generator with newlib/musl parameters. This function has following property - f(x, data[T]) = f(f(x, data[:T - 1]), data[T]) This function expects current_hash.shape and data.shape[:-1] to match/broadcastable.
10,697
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
Args: current_hash (`torch.LongTensor`): (shape,) data (`torch.LongTensor`): (shape, tensor_len) multiplier (`int`, optional, *optional*, defaults to 6364136223846793005): multiplier of linear congruential generator increment (`int`, optional, *optional*, defaults to 1): increment of linear congruential generator Returns: updated hash (shape,) """ for i in range(data.shape[-1]): current_hash = torch.add(current_hash, data[..., i]) current_hash = torch.mul(current_hash, multiplier) current_hash = torch.add(current_hash, increment) return current_hash def compute_ngram_keys(self, ngrams: torch.LongTensor) -> torch.LongTensor: """Computes random keys for each ngram and depth. Args: ngrams (`torch.LongTensor`): Ngrams (batch_size, num_ngrams, ngram_len).
10,697
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
Returns: ngram keys (batch_size, num_ngrams, depth). """ if len(ngrams.shape) != 3: raise ValueError( "Ngrams should be of shape (batch_size, num_ngrams, ngram_len), but" f" is {ngrams.shape}" ) if ngrams.shape[2] != self.ngram_len: raise ValueError( "Ngrams should be of shape (batch_size, num_ngrams, ngram_len)," f" where ngram_len is {self.ngram_len}, but is {ngrams.shape}" ) batch_size, _, _ = ngrams.shape hash_result = torch.ones(batch_size, device=self.device, dtype=torch.long) # hash_result shape [batch_size,] # ngrams shape [batch_size, num_ngrams, ngram_len] hash_result = torch.vmap(self.accumulate_hash, in_dims=(None, 1), out_dims=1)(hash_result, ngrams) # hash_result shape [batch_size, num_ngrams]
10,697
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
keys = self.keys[None, None, :, None] # hash_result shape [batch_size, num_ngrams] # keys shape [1, 1, depth, 1] hash_result = torch.vmap(self.accumulate_hash, in_dims=(None, 2), out_dims=2)(hash_result, keys) # hash_result shape [batch_size, num_ngrams, depth] return hash_result def _compute_keys( self, n_minus_1_grams: torch.LongTensor, indices: torch.LongTensor ) -> Tuple[torch.LongTensor, torch.LongTensor]: """Computes random keys for each ngram and depth. Args: n_minus_1_grams (`torch.LongTensor`): Ngrams (batch_size, ngram_len - 1). indices (`torch.LongTensor`): indices of the continuations (batch_size, num_indices) Returns: Ngram keys (batch_size, num_indices, depth). """ batch_size, _ = n_minus_1_grams.shape
10,697
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
hash_result = torch.ones(batch_size, device=self.device, dtype=torch.long) # First hash n_minus_1 gram, for each batch entry we have a single # n_minus_1 gram context. # hash_result shape [batch_size] # n_minus_1_gram shape [batch_size, ngram_len - 1] hash_result_with_just_context = self.accumulate_hash(hash_result, n_minus_1_grams) # hash_result shape [batch_size,] # Indices is of shape [batch_size, num_indices], so we make it # [batch_size, num_indices, 1] so we can vmap over num_indices dim. hash_result = torch.vmap(self.accumulate_hash, in_dims=(None, 1), out_dims=1)( hash_result_with_just_context, indices[:, :, None] ) # hash_result shape [batch_size, num_indices] # Basically we have a hash for each batch entry and each indices # Now we add watermarking keys to this hash. # keys are of shape [depth,] # We add batch, num_indices and data dimension to this making it
10,697
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
# [1, 1, depth, 1]. # So we can vmap over the depth dimension for compute_hash keys = self.keys[None, None, :, None] hash_result = torch.vmap(self.accumulate_hash, in_dims=(None, 2), out_dims=2)(hash_result, keys) # hash_result shape should be [batch_size, num_indices, depth] return hash_result, hash_result_with_just_context
10,697
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
def sample_g_values(self, ngram_keys: torch.LongTensor) -> torch.LongTensor: """ Samples g values from Bernoulli distribution. It is not possible to pass random keys in a vectorized way in torch. Instead we pre-compute a random sampling table, and use apply modulo table size to map from ngram keys (int64) to g values. Args: ngram_keys (`torch.LongTensor`): Random keys (batch_size, num_ngrams, depth). Returns: G values (batch_size, num_ngrams, depth). """ (sampling_table_size,) = self.sampling_table.shape sampling_table = self.sampling_table.reshape((1, 1, sampling_table_size)) ngram_keys = ngram_keys % sampling_table_size return torch.take_along_dim(sampling_table, indices=ngram_keys, dim=2)
10,697
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
def _check_input_ids_shape(self, input_ids: torch.LongTensor): """Checks the shape of input ids.""" if len(input_ids.shape) != 2: raise ValueError("Input ids should be of shape (batch_size, input_len), but is" f" {input_ids.shape}") def compute_g_values(self, input_ids: torch.LongTensor) -> torch.LongTensor: """ Computes g values for each ngram from the given sequence of tokens. Args: input_ids (`torch.LongTensor`): Input token ids (batch_size, input_len). Returns: G values (batch_size, input_len - (ngram_len - 1), depth). """ self._check_input_ids_shape(input_ids) ngrams = input_ids.unfold(dimension=1, size=self.ngram_len, step=1) ngram_keys = self.compute_ngram_keys(ngrams) return self.sample_g_values(ngram_keys) def compute_context_repetition_mask(self, input_ids: torch.LongTensor) -> torch.LongTensor: """ Computes repetition mask.
10,697
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
0 and 1 stand for repeated and not repeated context n-1 grams respectively. Args: input_ids (`torch.LongTensor`): Input token ids (batch_size, input_len). Returns: Repetitions mask (batch_size, input_len - (ngram_len - 1)). """ self._check_input_ids_shape(input_ids) batch_size, _ = input_ids.shape state = SynthIDTextWatermarkState( batch_size=batch_size, ngram_len=self.ngram_len, context_history_size=self.context_history_size, device=self.device, ) contexts = input_ids[:, :-1].unfold( dimension=1, size=self.ngram_len - 1, step=1, ) _, num_contexts, _ = contexts.shape
10,697
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
are_repeated_contexts = [] for i in range(num_contexts): context = contexts[:, i, :] hash_result = torch.ones(batch_size, device=self.device, dtype=torch.long) context_hash = self.accumulate_hash(hash_result, context)[:, None] is_repeated_context = (state.context_history == context_hash).any( dim=1, keepdim=True, ) are_repeated_contexts.append(is_repeated_context) state.context_history = torch.concat( (context_hash, state.context_history), dim=1, )[:, :-1] are_repeated_contexts = torch.concat(are_repeated_contexts, dim=1) return torch.logical_not(are_repeated_contexts) def compute_eos_token_mask(self, input_ids: torch.LongTensor, eos_token_id: int) -> torch.LongTensor: """ Computes repetitions mask. 1 stands for ngrams that don't contain EOS tokens and vice versa.
10,697
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
Args: input_ids (`torch.LongTensor`): Input token ids (batch_size, input_len). eos_token_id (`int`): EOS token ID. Returns: EOS token mask (batch_size, input_len). """ self._check_input_ids_shape(input_ids) noneos_masks = [] all_eos_equated = input_ids == eos_token_id for eos_equated in all_eos_equated: nonzero_idx = torch.nonzero(eos_equated) noneos_mask = torch.ones_like(eos_equated) if nonzero_idx.shape[0] != 0: noneos_mask[nonzero_idx[0][0] :] = 0 noneos_masks.append(noneos_mask) return torch.stack(noneos_masks, dim=0) def expected_mean_g_value(self, vocab_size: int, coinflip_prob: float = 0.5) -> float: """ Compute expected mean g-value after watermarking, assuming uniform LM dist. This is the theoretical expected value for single-layer watermarking.
10,697
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
Args: vocab_size (`int`): The size of the vocabulary. coinflip_prob arg_name (`float`, *optional*, defaults to 0.5): Probability of 1 in boolean prf. Returns: The expected mean g-value for watermarked text. """ return coinflip_prob + coinflip_prob * (1 - coinflip_prob) * (1 - (1 / vocab_size))
10,697
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/logits_process.py
class TFLogitsProcessor: """Abstract base class for all logit processors that can be applied during generation.""" @add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: """TF method for processing logits.""" raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
10,698
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
class TFLogitsWarper: """Abstract base class for all logit warpers that can be applied during generation with multinomial sampling.""" @add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: """TF method for warping logits.""" raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
10,699
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
class TFLogitsProcessorList(list): """ This class can be used to create a list of [`TFLogitsProcessor`] to subsequently process a `scores` input tensor. This class inherits from list and adds a specific *__call__* method to apply each [`TFLogitsProcessor`] to the inputs. """
10,700
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
@add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int, **kwargs) -> tf.Tensor: for processor in self: function_args = inspect.signature(processor.__call__).parameters if len(function_args) > 3: if not all(arg in kwargs for arg in list(function_args.keys())[2:]): raise ValueError( f"Make sure that all the required parameters: {list(function_args.keys())} for " f"{processor.__class__} are passed to the logits processor." ) scores = processor(input_ids, scores, cur_len, **kwargs) else: scores = processor(input_ids, scores, cur_len) return scores
10,700
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
class TFTemperatureLogitsWarper(TFLogitsWarper): r""" [`TFLogitsWarper`] for temperature (exponential scaling output probability distribution). Args: temperature (`float`): The value used to module the logits distribution. """ def __init__(self, temperature: float): if not isinstance(temperature, float) or not (temperature > 0): raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}") self.temperature = temperature def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: scores = scores / self.temperature return scores
10,701
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
class TFTopKLogitsWarper(TFLogitsWarper): r""" [`TFLogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements. Args: top_k (`int`): The number of highest probability vocabulary tokens to keep for top-k-filtering. filter_value (`float`, *optional*, defaults to -inf): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimum number of tokens that cannot be filtered. """ def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): if not isinstance(top_k, int) or top_k <= 0: raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}") self.top_k = max(top_k, min_tokens_to_keep) self.filter_value = filter_value
10,702
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: top_k = min(self.top_k, scores.shape[-1]) # Safety check # Boolean mask containing all tokens with a probability less than the last token of the top-k indices_to_remove = scores < tf.math.top_k(scores, k=top_k)[0][..., -1:] next_scores = tf.where(indices_to_remove, self.filter_value, scores) return next_scores
10,702
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
class TFTopPLogitsWarper(TFLogitsWarper): """ [`TFLogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to <= prob_cut_off. Args: top_p (`float`): If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. filter_value (`float`, *optional*, defaults to -inf): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimum number of tokens that cannot be filtered. """
10,703
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0): raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}") if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1): raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}") self.top_p = top_p self.filter_value = filter_value self.min_tokens_to_keep = min_tokens_to_keep def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: topk_scores, topk_indices = tf.math.top_k(scores, scores.shape[-1]) mask_scores = tf.fill(scores.shape, self.filter_value) cumulative_probs = tf.math.cumsum(stable_softmax(topk_scores, axis=-1), axis=-1) score_mask = cumulative_probs < self.top_p
10,703
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
# Also include the token that is higher than top_p (the first false = shift and insert a True on the left) score_mask = tf.concat((tf.ones([score_mask.shape[0], 1], dtype=tf.bool), score_mask[:, :-1]), axis=-1) # Ensure min tokens to keep score_mask = tf.concat( ( tf.ones([score_mask.shape[0], self.min_tokens_to_keep], dtype=tf.bool), score_mask[:, self.min_tokens_to_keep :], ), axis=-1, ) # Mask the values that do not fit the criteria topk_next_scores = tf.where(score_mask, topk_scores, mask_scores)
10,703
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
# Undo the topk sorting: converts the 2D matrix of per-row original indices of shape (batch_size, vocab_size) # to a 3D tensor of shape (batch_size, vocab_size, 2) containing the original score coordinate, from which we # can scatter (i.e. `scatter_indices[row, col, :]` is a tensor containing `[row, topk_indices[row, col]]`) scatter_rows = tf.tile(tf.expand_dims(tf.range(topk_indices.shape[0]), axis=-1), [1, topk_indices.shape[-1]]) scatter_indices = tf.stack((scatter_rows, topk_indices), axis=-1) next_scores = tf.scatter_nd(scatter_indices, topk_next_scores, shape=topk_next_scores.shape) return next_scores
10,703
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
class TFMinLengthLogitsProcessor(TFLogitsProcessor): r""" [`TFLogitsProcessor`] enforcing a min-length by setting EOS probability to 0. Args: min_length (`int`): The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`. eos_token_id (`int`): The id of the *end-of-sequence* token. """ def __init__(self, min_length: int, eos_token_id: int): if not isinstance(min_length, int) or min_length < 0: raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}") if not isinstance(eos_token_id, int) or eos_token_id < 0: raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}") self.min_length = min_length self.eos_token_id = eos_token_id
10,704
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
def _apply_eos_token_mask(self, scores: tf.Tensor) -> tf.Tensor: eos_token_id_mask = tf.range(scores.shape[-1]) == self.eos_token_id scores = tf.where(eos_token_id_mask, float("-inf"), scores) return scores def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: # applies eos token masking if the first argument is true scores = tf.cond( tf.less(cur_len, self.min_length), lambda: self._apply_eos_token_mask(scores), lambda: tf.identity(scores), ) return scores
10,704
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
class TFRepetitionPenaltyLogitsProcessor(TFLogitsProcessor): r""" [`TFLogitsProcessor`] enforcing an exponential penalty on repeated sequences. Args: repetition_penalty (`float`): The parameter for repetition penalty. 1.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. """ def __init__(self, penalty: float): if not isinstance(penalty, float) or not (penalty > 0): raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}") self.penalty = penalty def _create_score_penalties(self, input_ids: tf.Tensor, logits: tf.Tensor) -> tf.Tensor: # We want to populate the penalties in the positions of `input_ids`. Since XLA can't handle shapes unknown # before runtime, `tf.unique` can't be used. Therefore, we may have redundant updates, when a given row has # the same token multiple times.
10,705
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
# Gathers the penalties to apply logit_penalties = tf.gather(logits, input_ids, axis=1, batch_dims=1) logit_penalties = tf.where(logit_penalties > 0, 1 / self.penalty, logit_penalties) logit_penalties = tf.where(logit_penalties < 0, self.penalty, logit_penalties) # Scatters the penalties token_penalties = tf.ones(logits.shape) batch_size = input_ids.shape[0] seq_len = tf.shape(input_ids)[1] # the sequence length has dynamic size, hence the dynamic shape indexable_prev_input_ids = tf.concat( ( tf.expand_dims(tf.repeat(tf.range(batch_size), seq_len), axis=-1), tf.expand_dims(tf.reshape(input_ids, [-1]), axis=-1), ), axis=1, ) token_penalties = tf.tensor_scatter_nd_update( token_penalties, indices=indexable_prev_input_ids, updates=tf.reshape(logit_penalties, [-1]) ) return token_penalties
10,705
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: score_penalties = self._create_score_penalties(input_ids[:, :cur_len], scores) scores = tf.math.multiply(scores, score_penalties) return scores
10,705
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
class TFNoBadWordsLogitsProcessor(TFLogitsProcessor): """ [`TFLogitsProcessor`] that enforces that specified sequences will never be sampled. Args: bad_words_ids (`List[List[int]]`): List of list of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, make sure to set `add_prefix_space=True` when initializing the tokenizer, and use `tokenizer(bad_words, add_special_tokens=False).input_ids`. The `add_prefix_space` argument is only supported for some slow tokenizers, as fast tokenizers' prefixing behaviours come from `pre tokenizers`. Read more [here](https://huggingface.co/docs/tokenizers/api/pre-tokenizers). eos_token_id (`int`): The id of the *end-of-sequence* token. """
10,706
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
def __init__(self, bad_words_ids: List[List[int]], eos_token_id: int): if not isinstance(bad_words_ids, List) or len(bad_words_ids) == 0: raise ValueError(f"`bad_words_ids` has to be a non-empty list, but is {bad_words_ids}.") if any(not isinstance(bad_word_ids, list) for bad_word_ids in bad_words_ids): raise ValueError(f"`bad_words_ids` has to be a list of lists, but is {bad_words_ids}.") if any( any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in bad_word_ids) for bad_word_ids in bad_words_ids ): raise ValueError( f"Each list in `bad_words_ids` has to be a list of positive integers, but is {bad_words_ids}." )
10,706
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
# stores the information about bad words in three tensors: # 1. a rectangular tensor with the forbidden sequences (padded with `-1`), for full data comparisons self.bad_word_seqs_ids = tf.ragged.constant(bad_words_ids).to_tensor(default_value=-1) # 2. a tensor with the unpadded length of each forbidden sequence, for quick length comparisons bad_word_seqs_len = [len(bad_words) for bad_words in bad_words_ids] if any(word_len == 0 for word_len in bad_word_seqs_len): raise ValueError(f"Banned words token sequences {bad_words_ids} cannot have an empty list") self.bad_word_seqs_len = tf.convert_to_tensor(bad_word_seqs_len, dtype=tf.int32) # 3. a tensor containing the last token for each sequence, for easy access to the tokens that may be banned self.seq_forbidden_tokens = tf.convert_to_tensor([bad_words[-1] for bad_words in bad_words_ids])
10,706
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
def _calc_row_banned_bad_tokens(self, row_input_ids: tf.Tensor) -> tf.Tensor: def _tokens_match(bad_word_seq_number): def _len_one(): # If the bad sequence only has one token, always mask it return tf.cond( tf.math.equal(self.bad_word_seqs_len[bad_word_seq_number], 1), lambda: tf.ones((), dtype=tf.bool), _len_greater_than_cur_len, ) def _len_greater_than_cur_len(): # Otherwise, if the bad sequence is longer than the current length they can't ever match return tf.cond( tf.math.greater(self.bad_word_seqs_len[bad_word_seq_number], tf.shape(row_input_ids)[0]), lambda: tf.zeros((), dtype=tf.bool), _match_found, )
10,706
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
def _match_found(): # Finaly, runs the actual comparison. Can only be called if the previous comparisons do not yield # an answer (otherwise we get indexing exceptions) compare_len = self.bad_word_seqs_len[bad_word_seq_number] - 1 return tf.cond( tf.math.reduce_all( tf.math.equal( row_input_ids[-compare_len:], self.bad_word_seqs_ids[bad_word_seq_number, :compare_len] ) ), lambda: tf.ones((), dtype=tf.bool), lambda: tf.zeros((), dtype=tf.bool), ) match = _len_one() return match
10,706
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
# Compares the current row against all bad word sequences, obtaining a mask with the matches. match_mask = tf.map_fn(_tokens_match, tf.range(self.bad_word_seqs_ids.shape[0]), fn_output_signature=tf.bool) row_banned_tokens = self.seq_forbidden_tokens[match_mask] return row_banned_tokens
10,706
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: # We want to mask some banned tokens, at a score level. Since the banned tokens depend on the previous # `input_ids`, they may have a different length for each row, and they may even be empty for some rows. # To remain simple and XLA-compatible, we work on a per-row fashion. # TODO (Joao): this function might trigger XLA retracing as `cur_len` increases. Fix it if it becomes # a frequent choke point. (make `cur_len` a tensor?) def _get_row_updated_score(row_inputs: Tuple[tf.Tensor]) -> tf.Tensor: row_input_ids, row_score = row_inputs banned_tokens = self._calc_row_banned_bad_tokens(row_input_ids[:cur_len]) banned_tokens_mask = tf.scatter_nd( indices=tf.expand_dims(banned_tokens, axis=-1), updates=tf.ones_like(banned_tokens, dtype=tf.bool), shape=row_score.shape, )
10,706
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
row_score = tf.where(banned_tokens_mask, -float("inf"), row_score) return row_score
10,706
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
scores = tf.map_fn(_get_row_updated_score, (input_ids, scores), fn_output_signature=tf.float32) return scores
10,706
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
class TFNoRepeatNGramLogitsProcessor(TFLogitsProcessor): r""" [`TFLogitsProcessor`] that enforces no repetition of n-grams. See [Fairseq](https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345). Args: ngram_size (`int`): All ngrams of size `ngram_size` can only occur once. """ def __init__(self, ngram_size: int): if not isinstance(ngram_size, int) or ngram_size <= 0: raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}") self.ngram_size = ngram_size
10,707
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
def calc_banned_ngram_tokens(self, input_ids, num_hypos, cur_len): # Copied from fairseq for no_repeat_ngram in beam_search if cur_len + 1 < self.ngram_size: # return no banned tokens if we haven't generated ngram_size tokens yet return [[] for _ in range(num_hypos)] generated_ngrams = [{} for _ in range(num_hypos)] prev_input_ids = input_ids[:, :cur_len] for idx in range(num_hypos): gen_tokens = prev_input_ids[idx].numpy().tolist() generated_ngram = generated_ngrams[idx] for ngram in zip(*[gen_tokens[i:] for i in range(self.ngram_size)]): prev_ngram_tuple = tuple(ngram[:-1]) generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]]
10,707
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
def _get_generated_ngrams(hypo_idx): # Before decoding the next token, prevent decoding of ngrams that have already appeared start_idx = cur_len + 1 - self.ngram_size ngram_idx = tuple(prev_input_ids[hypo_idx, start_idx:cur_len].numpy().tolist()) return generated_ngrams[hypo_idx].get(ngram_idx, []) banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)] return banned_tokens def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: # TODO (joao): enable XLA on this logits processor. See discussion and attempts in # https://github.com/huggingface/transformers/pull/16974 if not tf.executing_eagerly(): raise NotImplementedError("TFNoRepeatNGramLogitsProcessor is only implemented for eager execution.") batch_size, vocab_size = scores.shape banned_tokens = self.calc_banned_ngram_tokens(input_ids, batch_size, cur_len)
10,707
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
# create banned_tokens boolean mask banned_tokens_indices_mask = [] for banned_tokens_slice in banned_tokens: banned_tokens_indices_mask.append( [True if token in banned_tokens_slice else False for token in range(vocab_size)] ) scores = tf.where(tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf"), scores) return scores
10,707
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
class TFForcedBOSTokenLogitsProcessor(TFLogitsProcessor): r""" [`TFLogitsProcessor`] that enforces the specified token as the first generated token. Args: bos_token_id (`int`): The id of the token to force as the first generated token. """ def __init__(self, bos_token_id: int): if bos_token_id < 0: raise ValueError(f"The forced bos token id must be a non-negative integer, got {bos_token_id}") self.bos_token_id = bos_token_id
10,708
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: if cur_len == 1: batch_size, num_tokens = scores.shape # sets the score to 0 in the bos_token_id column scores = tf.zeros((batch_size, 1)) # sets the score to -inf everywhere else if self.bos_token_id > 0: scores = tf.concat((tf.broadcast_to(-float("inf"), (batch_size, self.bos_token_id)), scores), axis=-1) if self.bos_token_id < (num_tokens - 1): scores = tf.concat( (scores, tf.broadcast_to(-float("inf"), (batch_size, (num_tokens - 1) - self.bos_token_id))), axis=-1, ) return scores
10,708
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
class TFForcedEOSTokenLogitsProcessor(TFLogitsProcessor): r""" [`TFLogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached. Args: max_length (`int`): The maximum length of the sequence to be generated. eos_token_id (`int`): The id of the token to force as the last generated token when `max_length` is reached. """ def __init__(self, max_length: int, eos_token_id: int): self.max_length = max_length if eos_token_id < 0: raise ValueError(f"The forced eos token id must be a non-negative integer, got {eos_token_id}") self.eos_token_id = eos_token_id
10,709
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: if cur_len == self.max_length - 1: batch_size, num_tokens = scores.shape # sets the score to 0 in the eos_token_id column scores = tf.zeros((batch_size, 1)) # sets the score to -inf everywhere else if self.eos_token_id > 0: scores = tf.concat((tf.broadcast_to(-float("inf"), (batch_size, self.eos_token_id)), scores), axis=-1) if self.eos_token_id < (num_tokens - 1): scores = tf.concat( (scores, tf.broadcast_to(-float("inf"), (batch_size, (num_tokens - 1) - self.eos_token_id))), axis=-1, ) return scores
10,709
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
class TFSuppressTokensAtBeginLogitsProcessor(TFLogitsProcessor): r""" [`TFSuppressTokensAtBeginLogitsProcessor`] suppresses a list of tokens as soon as the `generate` function starts generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` at not sampled at the beginning of the generation. """ def __init__(self, begin_suppress_tokens, begin_index): self.begin_suppress_tokens = list(begin_suppress_tokens) self.begin_index = begin_index def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: suppressed_indices = [] for token in self.begin_suppress_tokens: if token < scores.shape[-1]: # to ensure we don't go beyond the vocab size suppressed_indices.extend([[i, token] for i in range(scores.shape[0])])
10,710
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
if len(suppressed_indices) > 0: scores = tf.cond( tf.equal(cur_len, self.begin_index), lambda: tf.tensor_scatter_nd_update( scores, indices=suppressed_indices, updates=[-float("inf") for _ in range(scores.shape[0] * len(self.begin_suppress_tokens))], ), lambda: scores, ) return scores
10,710
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
class TFSuppressTokensLogitsProcessor(TFLogitsProcessor): r"""This processor can be used to suppress a list of tokens. The processor will set their log probs to `-inf` so that they are not sampled.""" def __init__(self, suppress_tokens): self.suppress_tokens = list(suppress_tokens) def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: suppressed_indices = [] for token in self.suppress_tokens: if token < scores.shape[-1]: # to ensure we don't go beyond the vocab size suppressed_indices.extend([[i, token] for i in range(scores.shape[0])]) if len(suppressed_indices) > 0: scores = tf.tensor_scatter_nd_update( scores, indices=[[i, token] for i in range(scores.shape[0]) for token in self.suppress_tokens], updates=[-float("inf") for _ in range(scores.shape[0] * len(self.suppress_tokens))], ) return scores
10,711
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
class TFForceTokensLogitsProcessor(TFLogitsProcessor): r"""This processor takes a list of pairs of integers which indicates a mapping from generation indices to token indices that will be forced before sampling. The processor will set their log probs to `0` and all other tokens to `-inf` so that they are sampled at their corresponding index."""
10,712
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
def __init__(self, force_token_map: List[List[int]]): force_token_map = dict(force_token_map) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have an negative value. force_token_array = np.ones((max(force_token_map.keys()) + 1), dtype=np.int32) * -1 for index, token in force_token_map.items(): if token is not None: force_token_array[index] = token self.force_token_array = tf.convert_to_tensor(force_token_array, dtype=tf.int32) def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: def _force_token(generation_idx): batch_size = scores.shape[0] current_token = self.force_token_array[generation_idx]
10,712
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
new_scores = tf.zeros_like(scores, dtype=scores.dtype) + tf.constant([scores.dtype.min]) indices = tf.stack((tf.range(batch_size), tf.tile([current_token], [batch_size])), axis=1) updates = tf.zeros((batch_size,), dtype=scores.dtype) new_scores = tf.tensor_scatter_nd_update(new_scores, indices, updates) return new_scores
10,712
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
scores = tf.cond( tf.greater_equal(cur_len, tf.shape(self.force_token_array)[0]), # If the current length is geq than the length of force_token_array, the processor does nothing. lambda: tf.identity(scores), # Otherwise, it may force a certain token. lambda: tf.cond( tf.greater_equal(self.force_token_array[cur_len], 0), # Only valid (positive) tokens are forced lambda: _force_token(cur_len), # Otherwise, the processor does nothing. lambda: scores, ), ) return scores
10,712
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/tf_logits_process.py
class StoppingCriteria(ABC): """Abstract base class for all stopping criteria that can be applied during generation. If your stopping criteria depends on the `scores` input, make sure you pass `return_dict_in_generate=True, output_scores=True` to `generate`. """ @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor: raise NotImplementedError("StoppingCriteria needs to be subclassed")
10,713
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/generation/stopping_criteria.py