'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowerCAmelCase ( ) -> List[str]:
lowercase : Optional[int] =ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__magic_name__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__magic_name__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__magic_name__ )
return parser.parse_args()
def _lowerCAmelCase ( ) -> List[str]:
lowercase : Dict =parse_args()
# Import training_script as a module.
lowercase : str =Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase : Dict =script_fpath.stem
lowercase : Union[str, Any] =importlib.import_module(__magic_name__ )
# Patch sys.argv
lowercase : Tuple =[args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
92
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__(self , lowercase__="</s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__=1_25 , lowercase__=None , **lowercase__ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
snake_case_ : Optional[Any] = [f'<extra_id_{i}>' for i in range(lowercase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case_ : int = len(set(filter(lambda lowercase__ : bool("""extra_id""" in str(lowercase__ ) ) , lowercase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
snake_case_ : Union[str, Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else pad_token
snake_case_ : Optional[int] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else eos_token
snake_case_ : Optional[Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else unk_token
super().__init__(
eos_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , extra_ids=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , )
snake_case_ : List[str] = extra_ids
snake_case_ : Union[str, Any] = 2**8 # utf is 8 bits
# define special tokens dict
snake_case_ : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
snake_case_ : List[Any] = len(self.special_tokens_encoder )
snake_case_ : Tuple = len(lowercase__ )
for i, token in enumerate(lowercase__ ):
snake_case_ : Union[str, Any] = self.vocab_size + i - n
snake_case_ : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __UpperCamelCase (self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase__ )) + [1]
return ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
def __UpperCamelCase (self , lowercase__ ):
if len(lowercase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : List[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : Union[str, Any] = self._add_eos_if_not_present(lowercase__ )
if token_ids_a is None:
return token_ids_a
else:
snake_case_ : int = self._add_eos_if_not_present(lowercase__ )
return token_ids_a + token_ids_a
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : List[str] = [chr(lowercase__ ) for i in text.encode("""utf-8""" )]
return tokens
def __UpperCamelCase (self , lowercase__ ):
if token in self.special_tokens_encoder:
snake_case_ : Any = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
snake_case_ : Optional[int] = self.added_tokens_encoder[token]
elif len(lowercase__ ) != 1:
snake_case_ : Dict = self.unk_token_id
else:
snake_case_ : List[str] = ord(lowercase__ ) + self._num_special_tokens
return token_id
def __UpperCamelCase (self , lowercase__ ):
if index in self.special_tokens_decoder:
snake_case_ : Tuple = self.special_tokens_decoder[index]
else:
snake_case_ : Dict = chr(index - self._num_special_tokens )
return token
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[int] = B""""""
for token in tokens:
if token in self.special_tokens_decoder:
snake_case_ : List[Any] = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
snake_case_ : Union[str, Any] = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
snake_case_ : Optional[int] = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
snake_case_ : Any = token.encode("""utf-8""" )
else:
snake_case_ : Dict = bytes([ord(lowercase__ )] )
bstring += tok_string
snake_case_ : List[Any] = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
return ()
480
0
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A ( a ):
__UpperCAmelCase : List[str] = """naver-clova-ix/donut-base-finetuned-docvqa"""
__UpperCAmelCase : Tuple = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
__UpperCAmelCase : Optional[Any] = """document_qa"""
__UpperCAmelCase : str = AutoProcessor
__UpperCAmelCase : Any = VisionEncoderDecoderModel
__UpperCAmelCase : List[str] = ["""image""", """text"""]
__UpperCAmelCase : Optional[int] = ["""text"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any:
_a = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
_a = task_prompt.replace("{user_input}" , snake_case_ )
_a = self.pre_processor.tokenizer(
snake_case_ , add_special_tokens=snake_case_ , return_tensors="pt" ).input_ids
_a = self.pre_processor(snake_case_ , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __lowerCAmelCase ( self , snake_case_ ) -> Dict:
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=snake_case_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=snake_case_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=snake_case_ , ).sequences
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
_a = self.pre_processor.batch_decode(snake_case_ )[0]
_a = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
_a = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
_a = re.sub(R"<.*?>" , "" , snake_case_ , count=1 ).strip() # remove first task start token
_a = self.pre_processor.tokenajson(snake_case_ )
return sequence["answer"]
"""simple docstring"""
import os
def __magic_name__ ( _lowerCamelCase : Dict ):
__a : List[str] = len(grid[0] )
__a : int = len(_lowerCamelCase )
__a : Tuple = 0
__a : List[Any] = 0
__a : List[str] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_lowerCamelCase ):
for j in range(n_rows - 3 ):
__a : List[Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__a : Tuple = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__a : List[Any] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__a : List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__a : str = max(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if max_product > largest:
__a : Optional[Any] = max_product
return largest
def __magic_name__ ( ):
__a : Tuple = []
with open(os.path.dirname(_lowerCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
__a : Tuple = [[int(_lowerCamelCase ) for i in grid[j]] for j in range(len(_lowerCamelCase ) )]
return largest_product(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
63
1
"""simple docstring"""
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
if not (isinstance(a__ , a__ ) and isinstance(a__ , a__ )):
raise ValueError('longest_common_substring() takes two strings for inputs' )
lowerCAmelCase :str = len(a__ )
lowerCAmelCase :List[Any] = len(a__ )
lowerCAmelCase :List[Any] = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
lowerCAmelCase :List[Any] = 0
lowerCAmelCase :List[Any] = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
lowerCAmelCase :str = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
lowerCAmelCase :Any = i
lowerCAmelCase :Optional[int] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
553
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase )
class __UpperCamelCase ( UpperCamelCase ):
def __init__( self : List[Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Tuple ) -> Optional[int]:
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCAmelCase__ ( self : List[str] , UpperCAmelCase : str=None , UpperCAmelCase : str=None , UpperCAmelCase : List[str]=None ) -> Tuple:
lowerCAmelCase :Optional[Any] = {}
lowerCAmelCase :int = {}
if prompt is not None:
lowerCAmelCase :Any = prompt
if generate_kwargs is not None:
lowerCAmelCase :List[Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCAmelCase :Any = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
lowerCAmelCase :Union[str, Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Tuple , UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCAmelCase : Any ) -> Optional[int]:
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Any=None ) -> str:
lowerCAmelCase :Dict = load_image(UpperCAmelCase )
if prompt is not None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError(
f"""Received an invalid text input, got - {type(UpperCAmelCase )} - but expected a single string. """
'Note also that one single text can be provided for conditional image to text generation.' )
lowerCAmelCase :List[str] = self.model.config.model_type
if model_type == "git":
lowerCAmelCase :Any = self.image_processor(images=UpperCAmelCase , return_tensors=self.framework )
lowerCAmelCase :Union[str, Any] = self.tokenizer(text=UpperCAmelCase , add_special_tokens=UpperCAmelCase ).input_ids
lowerCAmelCase :Optional[int] = [self.tokenizer.cls_token_id] + input_ids
lowerCAmelCase :Union[str, Any] = torch.tensor(UpperCAmelCase ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
lowerCAmelCase :Optional[Any] = self.image_processor(images=UpperCAmelCase , header_text=UpperCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCAmelCase :int = self.image_processor(images=UpperCAmelCase , return_tensors=self.framework )
lowerCAmelCase :Optional[Any] = self.tokenizer(UpperCAmelCase , return_tensors=self.framework )
model_inputs.update(UpperCAmelCase )
else:
raise ValueError(f"""Model type {model_type} does not support conditional text generation""" )
else:
lowerCAmelCase :str = self.image_processor(images=UpperCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCAmelCase :Union[str, Any] = None
return model_inputs
def UpperCAmelCase__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any=None ) -> int:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , UpperCAmelCase )
and all(x is None for x in model_inputs['input_ids'] )
):
lowerCAmelCase :Optional[int] = None
if generate_kwargs is None:
lowerCAmelCase :Optional[int] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCAmelCase :Optional[int] = model_inputs.pop(self.model.main_input_name )
lowerCAmelCase :Any = self.model.generate(UpperCAmelCase , **UpperCAmelCase , **UpperCAmelCase )
return model_outputs
def UpperCAmelCase__ ( self : Dict , UpperCAmelCase : Union[str, Any] ) -> Any:
lowerCAmelCase :str = []
for output_ids in model_outputs:
lowerCAmelCase :Optional[int] = {
'generated_text': self.tokenizer.decode(
UpperCAmelCase , skip_special_tokens=UpperCAmelCase , )
}
records.append(UpperCAmelCase )
return records
553
1
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : Optional[int] = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''openai-gpt'''
_UpperCAmelCase = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Dict , __lowerCAmelCase : Dict=4_0478 , __lowerCAmelCase : Optional[int]=512 , __lowerCAmelCase : int=768 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : str=1E-5 , __lowerCAmelCase : Optional[int]=0.0_2 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Any=True , __lowerCAmelCase : str=0.1 , **__lowerCAmelCase : Dict , ) -> List[Any]:
"""simple docstring"""
a = vocab_size
a = n_positions
a = n_embd
a = n_layer
a = n_head
a = afn
a = resid_pdrop
a = embd_pdrop
a = attn_pdrop
a = layer_norm_epsilon
a = initializer_range
a = summary_type
a = summary_use_proj
a = summary_activation
a = summary_first_dropout
a = summary_proj_to_labels
super().__init__(**__lowerCAmelCase )
32
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : List[Any] = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''rwkv'''
_UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=5_0277 , __lowerCAmelCase : str=1024 , __lowerCAmelCase : Union[str, Any]=4096 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=True , **__lowerCAmelCase : List[str] , ) -> List[Any]:
"""simple docstring"""
a = vocab_size
a = context_length
a = hidden_size
a = num_hidden_layers
a = attention_hidden_size if attention_hidden_size is not None else hidden_size
a = intermediate_size if intermediate_size is not None else 4 * hidden_size
a = layer_norm_epsilon
a = rescale_every
a = use_cache
a = bos_token_id
a = eos_token_id
super().__init__(
tie_word_embeddings=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
32
1
'''simple docstring'''
import numpy as np
import qiskit
def a_ ( lowerCamelCase : int = 8 , lowerCamelCase : int | None = None ):
lowerCAmelCase = np.random.default_rng(seed=lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
lowerCAmelCase = 6 * key_len
# Measurement basis for Alice's qubits.
lowerCAmelCase = rng.integers(2 , size=lowerCamelCase )
# The set of states Alice will prepare.
lowerCAmelCase = rng.integers(2 , size=lowerCamelCase )
# Measurement basis for Bob's qubits.
lowerCAmelCase = rng.integers(2 , size=lowerCamelCase )
# Quantum Circuit to simulate BB84
lowerCAmelCase = qiskit.QuantumCircuit(lowerCamelCase , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
lowerCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
lowerCAmelCase = qiskit.execute(lowerCamelCase , lowerCamelCase , shots=1 , seed_simulator=lowerCamelCase )
# Returns the result of measurement.
lowerCAmelCase = job.result().get_counts(lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
lowerCAmelCase = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
lowerCamelCase , lowerCamelCase , lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
lowerCAmelCase = gen_key[:key_len] if len(lowerCamelCase ) >= key_len else gen_key.ljust(lowerCamelCase , '0' )
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
UpperCAmelCase = [8, 5, 9, 7]
UpperCAmelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCAmelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCAmelCase_ :
def __init__( self : List[str] , __UpperCamelCase : list[int] , __UpperCamelCase : list[list[int]] , __UpperCamelCase : list[list[int]] , ) -> None:
_UpperCamelCase = claim_vector
_UpperCamelCase = allocated_resources_table
_UpperCamelCase = maximum_claim_table
def _UpperCamelCase ( self : Union[str, Any] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _UpperCamelCase ( self : Any ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _UpperCamelCase ( self : Tuple ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _UpperCamelCase ( self : Any ) -> dict[int, list[int]]:
return {self.__need().index(__UpperCamelCase ): i for i in self.__need()}
def _UpperCamelCase ( self : str , **__UpperCamelCase : Union[str, Any] ) -> None:
_UpperCamelCase = self.__need()
_UpperCamelCase = self.__allocated_resources_table
_UpperCamelCase = self.__available_resources()
_UpperCamelCase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
_UpperCamelCase = False
for each_need in need_list:
_UpperCamelCase = True
for index, need in enumerate(__UpperCamelCase ):
if need > available_resources[index]:
_UpperCamelCase = False
break
if execution:
_UpperCamelCase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_UpperCamelCase = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(__UpperCamelCase )
# update available/freed resources stack
_UpperCamelCase = np.array(__UpperCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__UpperCamelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(__UpperCamelCase ) + 1}'''
+ ''' '''.join(F'''{it:>8}''' for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(__UpperCamelCase ) + 1}'''
+ ''' '''.join(F'''{it:>8}''' for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__UpperCamelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__UpperCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
420
1
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
703
from __future__ import annotations
def lowerCamelCase_ ( _a : str , _a : list[str] | None = None , _a : dict[str, float] | None = None , _a : bool = False , ):
'''simple docstring'''
UpperCAmelCase_ : int = cipher_alphabet or [chr(_a ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCAmelCase_ : int = {
"""a""": 0.0_8_4_9_7,
"""b""": 0.0_1_4_9_2,
"""c""": 0.0_2_2_0_2,
"""d""": 0.0_4_2_5_3,
"""e""": 0.1_1_1_6_2,
"""f""": 0.0_2_2_2_8,
"""g""": 0.0_2_0_1_5,
"""h""": 0.0_6_0_9_4,
"""i""": 0.0_7_5_4_6,
"""j""": 0.0_0_1_5_3,
"""k""": 0.0_1_2_9_2,
"""l""": 0.0_4_0_2_5,
"""m""": 0.0_2_4_0_6,
"""n""": 0.0_6_7_4_9,
"""o""": 0.0_7_5_0_7,
"""p""": 0.0_1_9_2_9,
"""q""": 0.0_0_0_9_5,
"""r""": 0.0_7_5_8_7,
"""s""": 0.0_6_3_2_7,
"""t""": 0.0_9_3_5_6,
"""u""": 0.0_2_7_5_8,
"""v""": 0.0_0_9_7_8,
"""w""": 0.0_2_5_6_0,
"""x""": 0.0_0_1_5_0,
"""y""": 0.0_1_9_9_4,
"""z""": 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
UpperCAmelCase_ : Tuple = frequencies_dict
if not case_sensitive:
UpperCAmelCase_ : Any = ciphertext.lower()
# Chi squared statistic values
UpperCAmelCase_ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_a ) ):
UpperCAmelCase_ : Optional[int] = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCAmelCase_ : Any = (alphabet_letters.index(letter.lower() ) - shift) % len(
_a )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCAmelCase_ : int = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCAmelCase_ : Tuple = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCAmelCase_ : int = decrypted_with_shift.lower().count(_a )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCAmelCase_ : Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCAmelCase_ : List[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCAmelCase_ : List[str] = decrypted_with_shift.count(_a )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCAmelCase_ : Dict = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCAmelCase_ : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCAmelCase_ : List[str] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_a : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCAmelCase_ : int = min(
_a , key=_a , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Tuple = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
322
0
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(_lowercase )
class _snake_case ( _lowercase ):
def __init__( self: str , **__lowerCamelCase: str ) -> List[Any]:
super().__init__(**__lowerCamelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self: Any , __lowerCamelCase: Union[str, List[str], "Image", List["Image"]] , **__lowerCamelCase: Tuple ) -> List[str]:
return super().__call__(__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Any , **__lowerCamelCase: List[Any] ) -> Optional[int]:
__UpperCAmelCase : Any = {}
if "candidate_labels" in kwargs:
__UpperCAmelCase : Any = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
__UpperCAmelCase : str = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: int , __lowerCamelCase: Dict=None , __lowerCamelCase: Optional[int]="This is a photo of {}." ) -> Optional[Any]:
__UpperCAmelCase : Union[str, Any] = load_image(__lowerCamelCase )
__UpperCAmelCase : Tuple = self.image_processor(images=[image] , return_tensors=self.framework )
__UpperCAmelCase : Dict = candidate_labels
__UpperCAmelCase : Any = [hypothesis_template.format(__lowerCamelCase ) for x in candidate_labels]
__UpperCAmelCase : Any = self.tokenizer(__lowerCamelCase , return_tensors=self.framework , padding=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = [text_inputs]
return inputs
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Dict ) -> Tuple:
__UpperCAmelCase : str = model_inputs.pop("candidate_labels" )
__UpperCAmelCase : Dict = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __lowerCamelCase ):
__UpperCAmelCase : Dict = text_inputs[0]
else:
# Batching case.
__UpperCAmelCase : Optional[int] = text_inputs[0][0]
__UpperCAmelCase : Optional[int] = self.model(**__lowerCamelCase , **__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def _lowerCamelCase ( self: Any , __lowerCamelCase: int ) -> Optional[Any]:
__UpperCAmelCase : Tuple = model_outputs.pop("candidate_labels" )
__UpperCAmelCase : Dict = model_outputs["logits"][0]
if self.framework == "pt":
__UpperCAmelCase : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
__UpperCAmelCase : Optional[Any] = probs.tolist()
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Optional[Any] = [scores]
elif self.framework == "tf":
__UpperCAmelCase : str = stable_softmax(__lowerCamelCase , axis=-1 )
__UpperCAmelCase : Optional[Any] = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
__UpperCAmelCase : List[Any] = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__lowerCamelCase , __lowerCamelCase ) , key=lambda __lowerCamelCase : -x[0] )
]
return result
382
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
_snake_case = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Union[str, Any] = torch.load(snake_case__, map_location="cpu" )
return sd
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=rename_keys_prefix ) -> List[Any]:
__UpperCAmelCase : Optional[int] = OrderedDict()
__UpperCAmelCase : List[str] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__UpperCAmelCase : Optional[int] = key
for name_pair in rename_keys_prefix:
__UpperCAmelCase : List[Any] = new_key.replace(name_pair[0], name_pair[1] )
__UpperCAmelCase : Optional[Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__UpperCAmelCase : Optional[Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Optional[Any]:
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
__UpperCAmelCase : int = "pretraining"
if "vcr" in checkpoint_path:
__UpperCAmelCase : Optional[Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__UpperCAmelCase : int = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__UpperCAmelCase : Tuple = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__UpperCAmelCase : Optional[Any] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
__UpperCAmelCase : Optional[int] = {"visual_embedding_dim": 512}
__UpperCAmelCase : List[str] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__UpperCAmelCase : Optional[int] = {"visual_embedding_dim": 2048}
__UpperCAmelCase : str = "vqa_advanced"
elif "vqa" in checkpoint_path:
__UpperCAmelCase : str = {"visual_embedding_dim": 2048, "num_labels": 3129}
__UpperCAmelCase : Union[str, Any] = "vqa"
elif "nlvr" in checkpoint_path:
__UpperCAmelCase : str = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__UpperCAmelCase : Optional[int] = "nlvr"
__UpperCAmelCase : Optional[int] = VisualBertConfig(**snake_case__ )
# Load State Dict
__UpperCAmelCase : str = load_state_dict(snake_case__ )
__UpperCAmelCase : int = get_new_dict(snake_case__, snake_case__ )
if model_type == "pretraining":
__UpperCAmelCase : Union[str, Any] = VisualBertForPreTraining(snake_case__ )
elif model_type == "vqa":
__UpperCAmelCase : Union[str, Any] = VisualBertForQuestionAnswering(snake_case__ )
elif model_type == "nlvr":
__UpperCAmelCase : str = VisualBertForVisualReasoning(snake_case__ )
elif model_type == "multichoice":
__UpperCAmelCase : int = VisualBertForMultipleChoice(snake_case__ )
model.load_state_dict(snake_case__ )
# Save Checkpoints
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
_snake_case = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
382
1
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowercase__ ( lowerCAmelCase : List[str]=None ) -> Any:
"""simple docstring"""
if subparsers is not None:
UpperCAmelCase = subparsers.add_parser('test' )
else:
UpperCAmelCase = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=lowercase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase_ )
return parser
def lowercase__ ( lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
UpperCAmelCase = script_name
else:
UpperCAmelCase = F"--config_file={args.config_file} {script_name}"
UpperCAmelCase = ["accelerate-launch"] + test_args.split()
UpperCAmelCase = execute_subprocess_async(lowercase_ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = test_command_parser()
UpperCAmelCase = parser.parse_args()
test_command(lowercase_ )
if __name__ == "__main__":
main()
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Union[str, Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
567
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE () -> Generator[int, None, None]:
'''simple docstring'''
lowercase_ = {}
lowercase_ = 2
while True:
lowercase_ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase )
if factor:
lowercase_ = factor + prime
while x in factor_map:
x += factor
lowercase_ = factor
else:
lowercase_ = prime
yield prime
prime += 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1E10 ) -> int:
'''simple docstring'''
lowercase_ = sieve()
lowercase_ = 1
while True:
lowercase_ = next(__lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
567
1
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
from argparse import ArgumentParser
from .env import EnvironmentCommand
def UpperCAmelCase_ ( ):
SCREAMING_SNAKE_CASE__ =ArgumentParser("""Diffusers CLI tool""", usage="""diffusers-cli <command> [<args>]""" )
SCREAMING_SNAKE_CASE__ =parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(__UpperCamelCase )
# Let's go
SCREAMING_SNAKE_CASE__ =parser.parse_args()
if not hasattr(__UpperCamelCase, """func""" ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE__ =args.func(__UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
151
0
def UpperCamelCase_ ( __a ) -> list[list[float]]:
a__ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(__a ):
if len(__a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__a ) )
return data_lists
def UpperCamelCase_ ( __a , __a ) -> list[list[float]]:
a__ : list[list[float]] = []
for dlist, weight in zip(__a , __a ):
a__ : str = min(__a )
a__ : Dict = max(__a )
a__ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
a__ : Dict = f'''Invalid weight of {weight:f} provided'''
raise ValueError(__a )
score_lists.append(__a )
return score_lists
def UpperCamelCase_ ( __a ) -> list[float]:
a__ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__a ):
a__ : Optional[Any] = final_scores[j] + ele
return final_scores
def UpperCamelCase_ ( __a , __a ) -> list[list[float]]:
a__ : int = get_data(__a )
a__ : Tuple = calculate_each_score(__a , __a )
a__ : List[str] = generate_final_scores(__a )
# append scores to source data
for i, ele in enumerate(__a ):
source_data[i].append(__a )
return source_data
151
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A__ ( A__ ):
"""simple docstring"""
_lowercase = (DPMSolverSinglestepScheduler,)
_lowercase = (('num_inference_steps', 2_5),)
def _UpperCamelCase( self : Optional[int] , **lowerCamelCase__ : Tuple ):
a__ : Any = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**lowerCamelCase__ )
return config
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple=0 , **lowerCamelCase__ : Union[str, Any] ):
a__ : Optional[Any] = dict(self.forward_default_kwargs )
a__ : Any = kwargs.pop("num_inference_steps" , lowerCamelCase__ )
a__ : Any = self.dummy_sample
a__ : int = 0.1 * sample
a__ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a__ : Dict = self.get_scheduler_config(**lowerCamelCase__ )
a__ : Any = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
a__ : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
a__ : List[str] = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
a__ : int = dummy_past_residuals[: new_scheduler.config.solver_order]
a__, a__ : Any = sample, sample
for t in range(lowerCamelCase__ , time_step + scheduler.config.solver_order + 1 ):
a__ : List[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
a__ : List[str] = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _UpperCamelCase( self : Any ):
pass
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Optional[Any]=0 , **lowerCamelCase__ : Optional[int] ):
a__ : Tuple = dict(self.forward_default_kwargs )
a__ : Tuple = kwargs.pop("num_inference_steps" , lowerCamelCase__ )
a__ : Union[str, Any] = self.dummy_sample
a__ : Tuple = 0.1 * sample
a__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a__ : Dict = self.get_scheduler_config()
a__ : List[str] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
a__ : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
a__ : int = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
a__ : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
a__ : List[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
a__ : Dict = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Any=None , **lowerCamelCase__ : Union[str, Any] ):
if scheduler is None:
a__ : Union[str, Any] = self.scheduler_classes[0]
a__ : Optional[Any] = self.get_scheduler_config(**lowerCamelCase__ )
a__ : str = scheduler_class(**lowerCamelCase__ )
a__ : List[Any] = self.scheduler_classes[0]
a__ : int = self.get_scheduler_config(**lowerCamelCase__ )
a__ : Any = scheduler_class(**lowerCamelCase__ )
a__ : Any = 10
a__ : int = self.dummy_model()
a__ : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : List[str] = model(lowerCamelCase__ , lowerCamelCase__ )
a__ : Any = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
return sample
def _UpperCamelCase( self : str ):
a__ : str = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
a__ : Optional[Any] = 50
a__ : List[str] = self.dummy_model()
a__ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
a__ : List[str] = model(lowerCamelCase__ , lowerCamelCase__ )
a__ : List[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
a__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def _UpperCamelCase( self : Union[str, Any] ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
a__ : Tuple = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
a__ : int = self.full_loop(scheduler=lowerCamelCase__ )
a__ : str = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
a__ : Tuple = DEISMultistepScheduler.from_config(scheduler.config )
a__ : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
a__ : int = UniPCMultistepScheduler.from_config(scheduler.config )
a__ : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
a__ : Optional[Any] = self.full_loop(scheduler=lowerCamelCase__ )
a__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _UpperCamelCase( self : Union[str, Any] ):
self.check_over_configs(thresholding=lowerCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase__ , prediction_type=lowerCamelCase__ , sample_max_value=lowerCamelCase__ , algorithm_type="dpmsolver++" , solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , )
def _UpperCamelCase( self : List[str] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def _UpperCamelCase( self : List[str] ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
a__ : Dict = self.full_loop(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
assert not torch.isnan(lowerCamelCase__ ).any(), "Samples have nan numbers"
def _UpperCamelCase( self : str ):
self.check_over_configs(lower_order_final=lowerCamelCase__ )
self.check_over_configs(lower_order_final=lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] ):
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _UpperCamelCase( self : Union[str, Any] ):
self.check_over_configs(variance_type=lowerCamelCase__ )
self.check_over_configs(variance_type="learned_range" )
def _UpperCamelCase( self : Any ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=lowerCamelCase__ , time_step=0 )
def _UpperCamelCase( self : Optional[int] ):
a__ : Optional[int] = self.full_loop()
a__ : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _UpperCamelCase( self : str ):
a__ : List[str] = self.full_loop(use_karras_sigmas=lowerCamelCase__ )
a__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def _UpperCamelCase( self : int ):
a__ : List[Any] = self.full_loop(prediction_type="v_prediction" )
a__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def _UpperCamelCase( self : Tuple ):
a__ : Any = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=lowerCamelCase__ )
a__ : List[str] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def _UpperCamelCase( self : str ):
a__ : Union[str, Any] = self.scheduler_classes[0]
a__ : List[str] = self.get_scheduler_config(thresholding=lowerCamelCase__ , dynamic_thresholding_ratio=0 )
a__ : int = scheduler_class(**lowerCamelCase__ )
a__ : int = 10
a__ : int = self.dummy_model()
a__ : Optional[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : Dict = model(lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
151
1
import inspect
import unittest
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCamelCase_ ( self ) -> List[str]:
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE__: Tuple= inspect.getmembers(lowerCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE__: Optional[int]= '''k-diffusion'''
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE__: int= '''invisible-watermark'''
assert backend in deps, f'{backend} is not in the deps table!'
64
from math import factorial
def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0 ) -> int:
return sum(int(snake_case__ ) for x in str(factorial(snake_case__ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
a__ : List[str] = """docs/source/en/_toctree.yml"""
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = defaultdict(__lowerCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
_lowerCAmelCase = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase = []
for duplicate_key in duplicates:
_lowerCAmelCase = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__lowerCamelCase ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__lowerCamelCase, key=lambda __lowerCamelCase : s["title"].lower() )
def A__ ( __lowerCamelCase=False ):
"""simple docstring"""
with open(__lowerCamelCase, encoding='utf-8' ) as f:
_lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase = content[api_idx]['sections']
# Then to the model doc
_lowerCAmelCase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_lowerCAmelCase = api_doc[model_idx]['sections']
_lowerCAmelCase = [(idx, section) for idx, section in enumerate(__lowerCamelCase ) if 'sections' in section]
_lowerCAmelCase = False
for idx, modality_doc in modalities_docs:
_lowerCAmelCase = modality_doc['sections']
_lowerCAmelCase = clean_model_doc_toc(__lowerCamelCase )
if old_modality_doc != new_modality_doc:
_lowerCAmelCase = True
if overwrite:
_lowerCAmelCase = new_modality_doc
if diff:
if overwrite:
_lowerCAmelCase = model_doc
_lowerCAmelCase = api_doc
with open(__lowerCamelCase, 'w', encoding='utf-8' ) as f:
f.write(yaml.dump(__lowerCamelCase, allow_unicode=__lowerCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a__ : str = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
309
0
def lowerCamelCase__ ( _a):
if a < 0:
raise ValueError("Input value must be a positive integer")
elif isinstance(_a , _a):
raise TypeError("Input value must be a 'int' type")
return bin(_a).count("1")
if __name__ == "__main__":
import doctest
doctest.testmod()
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A = 'src/transformers'
A = 'docs/source/en/tasks'
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
with open(__UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
A = f.readlines()
# Find the start prompt.
A = 0
while not lines[start_index].startswith(__UpperCAmelCase ):
start_index += 1
start_index += 1
A = start_index
while not lines[end_index].startswith(__UpperCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A = direct_transformers_import(TRANSFORMERS_PATH)
A = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def lowerCAmelCase__ ( lowerCamelCase__ ) -> Optional[int]:
A = TASK_GUIDE_TO_MODELS[task_guide]
A = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCAmelCase , set() )
A = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple:
A , A , A , A = _find_text_in_file(
filename=os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
A = get_model_list_for_task(__UpperCAmelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
' to fix this.' )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
A = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
713
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ : int = MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase_ : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING
def A_ ( self : Tuple ) -> int:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def A_ ( self : int ) -> Optional[int]:
'''simple docstring'''
A = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
A = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1E-0_5, 'token': 38_015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1E-0_5, 'token': 25_506, 'token_str': ' accuser'},
] , )
A = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1E-0_5,
'token': 38_015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1E-0_5,
'token': 25_506,
'token_str': ' accuser',
},
] , )
A = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2E-0_5, 'token': 13_606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2E-0_5, 'token': 3_499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9E-0_5, 'token': 2_941, 'token_str': ' Te'},
] , )
@require_torch
def A_ ( self : str ) -> int:
'''simple docstring'''
A = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
A = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2E-0_5, 'token': 35_676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2E-0_5, 'token': 16_416, 'token_str': 'ELS'},
] , )
A = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2E-0_5,
'token': 35_676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2E-0_5, 'token': 16_416, 'token_str': 'ELS'},
] , )
A = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1E-0_5, 'token': 3_499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2E-0_5, 'token': 2_941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2E-0_5, 'token': 13_606, 'token_str': ' Clara'},
] , )
A = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
[
{
'score': 2.2E-0_5,
'token': 35_676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2E-0_5, 'token': 16_416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2E-0_5,
'token': 35_676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2E-0_5, 'token': 16_416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def A_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
A = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
A = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(snake_case , snake_case )
@slow
@require_torch
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
A = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(snake_case )
@slow
@require_tf
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(snake_case )
def A_ ( self : Dict , snake_case : List[Any] ) -> Optional[int]:
'''simple docstring'''
A = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(snake_case ) , [
{'sequence': 'My name is John', 'score': 0.008, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.007, 'token': 1_573, 'token_str': ' Chris'},
] , )
A = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(snake_case ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.251,
'token': 2_201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.214,
'token': 12_790,
'token_str': ' Lyon',
},
] , )
A = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case ) , [
{'sequence': 'My name is Patrick', 'score': 0.005, 'token': 3_499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.000, 'token': 13_606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.000, 'token': 2_941, 'token_str': ' Te'},
] , )
@require_torch
def A_ ( self : List[str] ) -> Any:
'''simple docstring'''
A = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
A = None
A = None
self.run_pipeline_test(snake_case , [] )
@require_tf
def A_ ( self : Tuple ) -> Dict:
'''simple docstring'''
A = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
A = None
A = None
self.run_pipeline_test(snake_case , [] )
def A_ ( self : str , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
A = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def A_ ( self : Any , snake_case : Any , snake_case : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A = fill_masker.tokenizer
A = fill_masker.model
A = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
snake_case , [
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] , )
A = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
snake_case , [
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] , )
A = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
snake_case , [
[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
],
[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
],
] , )
with self.assertRaises(snake_case ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(snake_case ):
fill_masker('This is' )
self.run_test_top_k(snake_case , snake_case )
self.run_test_targets(snake_case , snake_case )
self.run_test_top_k_targets(snake_case , snake_case )
self.fill_mask_with_duplicate_targets_and_top_k(snake_case , snake_case )
self.fill_mask_with_multiple_masks(snake_case , snake_case )
def A_ ( self : str , snake_case : Any , snake_case : Optional[int] ) -> str:
'''simple docstring'''
A = tokenizer.get_vocab()
A = sorted(vocab.keys() )[:2]
# Pipeline argument
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case , targets=snake_case )
A = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
snake_case , [
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] , )
A = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , snake_case )
A = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(snake_case ) )
# Call argument
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case )
self.assertEqual(
snake_case , [
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] , )
A = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , snake_case )
A = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(snake_case ) )
# Score equivalence
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case )
A = [top_mask['token_str'] for top_mask in outputs]
A = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case ) == set(snake_case ):
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case )
A = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) )
# Raises with invalid
with self.assertRaises(snake_case ):
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(snake_case ):
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] )
with self.assertRaises(snake_case ):
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' )
def A_ ( self : Any , snake_case : Optional[Any] , snake_case : int ) -> List[Any]:
'''simple docstring'''
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case , top_k=2 )
A = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
snake_case , [
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] , )
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
snake_case , [
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] , )
self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) )
def A_ ( self : str , snake_case : List[str] , snake_case : Dict ) -> Tuple:
'''simple docstring'''
A = tokenizer.get_vocab()
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
# top_k=2, ntargets=3
A = sorted(vocab.keys() )[:3]
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=snake_case )
# If we use the most probably targets, and filter differently, we should still
# have the same results
A = [el['token_str'] for el in sorted(snake_case , key=lambda snake_case : x["score"] , reverse=snake_case )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case ).issubset(snake_case ):
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=snake_case )
# They should yield exactly the same result
self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) )
def A_ ( self : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Dict ) -> int:
'''simple docstring'''
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
A = tokenizer.get_vocab()
# String duplicates + id duplicates
A = sorted(vocab.keys() )[:3]
A = [targets[0], targets[1], targets[0], targets[2], targets[1]]
A = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=snake_case , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(snake_case ) , 3 )
def A_ ( self : str , snake_case : List[Any] , snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
A = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
snake_case , [
[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
],
[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
],
[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
],
] , )
109
0
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : int = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
629
import numpy as np
import qiskit
def A_ ( _lowerCAmelCase = 8 , _lowerCAmelCase = None ) -> str:
UpperCamelCase : Tuple = np.random.default_rng(seed=_lowerCAmelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
UpperCamelCase : List[str] = 6 * key_len
# Measurement basis for Alice's qubits.
UpperCamelCase : List[Any] = rng.integers(2 , size=_lowerCAmelCase )
# The set of states Alice will prepare.
UpperCamelCase : List[Any] = rng.integers(2 , size=_lowerCAmelCase )
# Measurement basis for Bob's qubits.
UpperCamelCase : Optional[int] = rng.integers(2 , size=_lowerCAmelCase )
# Quantum Circuit to simulate BB84
UpperCamelCase : List[Any] = qiskit.QuantumCircuit(_lowerCAmelCase , name="BB84" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCAmelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCAmelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCAmelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCAmelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCAmelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
UpperCamelCase : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
UpperCamelCase : Tuple = qiskit.execute(_lowerCAmelCase , _lowerCAmelCase , shots=1 , seed_simulator=_lowerCAmelCase )
# Returns the result of measurement.
UpperCamelCase : Optional[Any] = job.result().get_counts(_lowerCAmelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
UpperCamelCase : Tuple = "".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
UpperCamelCase : Tuple = gen_key[:key_len] if len(_lowerCAmelCase ) >= key_len else gen_key.ljust(_lowerCAmelCase , "0" )
return key
if __name__ == "__main__":
print(f"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
629
1
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : List[str] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = FlaxAutoModelForSeqaSeqLM.from_config(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
UpperCamelCase : Dict = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
UpperCamelCase : List[str] = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase : Optional[int] = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
UpperCamelCase : Tuple = F"""layers_{str(SCREAMING_SNAKE_CASE_ )}"""
# Self-Attention
UpperCamelCase : Dict = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
UpperCamelCase : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
UpperCamelCase : str = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
UpperCamelCase : Any = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
UpperCamelCase : Any = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
UpperCamelCase : str = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
UpperCamelCase : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
UpperCamelCase : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
UpperCamelCase : List[Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
UpperCamelCase : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
UpperCamelCase : Union[str, Any] = flax_model.params['''encoder''']['''block'''][str(SCREAMING_SNAKE_CASE_ )]['''layer''']
UpperCamelCase : str = tax_attention_key
UpperCamelCase : int = tax_attention_out
UpperCamelCase : Dict = tax_attention_query
UpperCamelCase : Dict = tax_attention_value
UpperCamelCase : int = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase : List[str] = tax_global_layer_norm
if split_mlp_wi:
UpperCamelCase : Tuple = tax_mlp_wi_a
UpperCamelCase : Optional[Any] = tax_mlp_wi_a
else:
UpperCamelCase : Optional[Any] = tax_mlp_wi
UpperCamelCase : Tuple = tax_mlp_wo
UpperCamelCase : Dict = tax_mlp_layer_norm
UpperCamelCase : Tuple = flax_model_encoder_layer_block
# Only for layer 0:
UpperCamelCase : Any = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
UpperCamelCase : Any = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase : List[str] = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
UpperCamelCase : Optional[Any] = tax_encoder_global_rel_embedding
# Assigning
UpperCamelCase : Any = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
UpperCamelCase : List[Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
UpperCamelCase : Any = F"""layers_{str(SCREAMING_SNAKE_CASE_ )}"""
# Self-Attention
UpperCamelCase : Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
UpperCamelCase : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
UpperCamelCase : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
UpperCamelCase : str = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
UpperCamelCase : Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
UpperCamelCase : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
UpperCamelCase : Tuple = tax_enc_dec_attention_module['''key''']['''kernel''']
UpperCamelCase : Dict = tax_enc_dec_attention_module['''out''']['''kernel''']
UpperCamelCase : Union[str, Any] = tax_enc_dec_attention_module['''query''']['''kernel''']
UpperCamelCase : Optional[Any] = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
UpperCamelCase : int = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
UpperCamelCase : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
UpperCamelCase : int = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
UpperCamelCase : Tuple = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
UpperCamelCase : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
UpperCamelCase : int = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
UpperCamelCase : int = flax_model.params['''decoder''']['''block'''][str(SCREAMING_SNAKE_CASE_ )]['''layer''']
UpperCamelCase : str = tax_attention_key
UpperCamelCase : str = tax_attention_out
UpperCamelCase : Union[str, Any] = tax_attention_query
UpperCamelCase : List[Any] = tax_attention_value
UpperCamelCase : int = tax_pre_attention_layer_norm
UpperCamelCase : Union[str, Any] = tax_enc_dec_attention_key
UpperCamelCase : Union[str, Any] = tax_enc_dec_attention_out
UpperCamelCase : Optional[int] = tax_enc_dec_attention_query
UpperCamelCase : Union[str, Any] = tax_enc_dec_attention_value
UpperCamelCase : List[str] = tax_cross_layer_norm
if split_mlp_wi:
UpperCamelCase : str = tax_mlp_wi_a
UpperCamelCase : Optional[Any] = tax_mlp_wi_a
else:
UpperCamelCase : List[str] = tax_mlp_wi
UpperCamelCase : Tuple = tax_mlp_wo
UpperCamelCase : int = txa_mlp_layer_norm
UpperCamelCase : List[Any] = flax_model_decoder_layer_block
# Decoder Normalization
UpperCamelCase : Optional[Any] = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
UpperCamelCase : Optional[Any] = txa_decoder_norm
# Only for layer 0:
UpperCamelCase : int = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
UpperCamelCase : Union[str, Any] = tax_decoder_rel_embedding
# Token Embeddings
UpperCamelCase : int = tax_model['''target''']['''token_embedder''']['''embedding''']
UpperCamelCase : int = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
UpperCamelCase : List[str] = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
__UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
__UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__magic_name__ = 16
__magic_name__ = 32
def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16 , lowerCamelCase : str = "bert-base-cased"):
A_ : int = AutoTokenizer.from_pretrained(lowerCamelCase)
A_ : Tuple = load_dataset("""glue""" , """mrpc""")
def tokenize_function(lowerCamelCase : int):
# max_length=None => use the model max length (it's actually the default)
A_ : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A_ : Optional[Any] = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(lowerCamelCase : Tuple):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""")
return tokenizer.pad(lowerCamelCase , padding="""longest""" , return_tensors="""pt""")
# Instantiate dataloaders.
A_ : List[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase)
A_ : List[str] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase)
return train_dataloader, eval_dataloader
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int]):
# Initialize accelerator
A_ : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : Optional[int] = config["""lr"""]
A_ : Dict = int(config["""num_epochs"""])
A_ : Dict = int(config["""seed"""])
A_ : List[str] = int(config["""batch_size"""])
A_ : Optional[int] = args.model_name_or_path
set_seed(lowerCamelCase)
A_ , A_ : str = get_dataloaders(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : List[str] = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase , return_dict=lowerCamelCase)
# Instantiate optimizer
A_ : Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A_ : str = optimizer_cls(params=model.parameters() , lr=lowerCamelCase)
if accelerator.state.deepspeed_plugin is not None:
A_ : Optional[int] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
A_ : Tuple = 1
A_ : Any = (len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A_ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=0 , num_training_steps=lowerCamelCase , )
else:
A_ : Dict = DummyScheduler(lowerCamelCase , total_num_steps=lowerCamelCase , warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : Tuple = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
# We need to keep track of how many total steps we have iterated over
A_ : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
A_ : str = 0
# Now we train the model
A_ : Dict = evaluate.load("""glue""" , """mrpc""")
A_ : str = 0
A_ : str = {}
for epoch in range(lowerCamelCase , lowerCamelCase):
model.train()
for step, batch in enumerate(lowerCamelCase):
A_ : Union[str, Any] = model(**lowerCamelCase)
A_ : str = outputs.loss
A_ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
A_ : Optional[int] = 0
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
A_ : List[str] = model(**lowerCamelCase)
A_ : Dict = outputs.logits.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
A_ , A_ : str = accelerator.gather(
(predictions, batch["""labels"""])) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCamelCase) - 1:
A_ : Dict = predictions[: len(eval_dataloader.dataset) - samples_seen]
A_ : Tuple = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
A_ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase)
A_ : Tuple = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
A_ : Optional[Any] = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""") , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( ):
A_ : Tuple = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""")
parser.add_argument(
"""--model_name_or_path""" , type=lowerCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase , )
parser.add_argument(
"""--output_dir""" , type=lowerCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=lowerCamelCase , default=lowerCamelCase , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase , default=3 , help="""Number of train epochs.""" , )
A_ : Union[str, Any] = parser.parse_args()
A_ : str = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase)
if __name__ == "__main__":
main()
665
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = [0] * len(lowerCamelCase)
A_ : Union[str, Any] = []
A_ : Union[str, Any] = []
A_ : Tuple = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase)):
if indegree[i] == 0:
queue.append(lowerCamelCase)
while queue:
A_ : Any = queue.pop(0)
cnt += 1
topo.append(lowerCamelCase)
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase)
if cnt != len(lowerCamelCase):
print("""Cycle exists""")
else:
print(lowerCamelCase)
# Adjacency List of Graph
__magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
665
1
from __future__ import annotations
from math import pi, sqrt
def __lowerCAmelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> tuple:
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
from math import sqrt
def a__ ( A__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(sqrt(A__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( A__ = 1_0_0_0_1 ):
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
while count != nth and number < 3:
number += 1
if is_prime(A__ ):
count += 1
while count != nth:
number += 2
if is_prime(A__ ):
count += 1
return number
if __name__ == "__main__":
print(F"""{solution() = }""")
101
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __a ( lowerCAmelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_= SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCAmelCase_= 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
UpperCAmelCase_= 4
UpperCAmelCase_= 48
UpperCAmelCase_= """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCAmelCase_= [6, 6, 6, 6]
UpperCAmelCase_= 60
UpperCAmelCase_= [6, 6, 6, 6]
UpperCAmelCase_= """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCAmelCase_= 4
UpperCAmelCase_= """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
UpperCAmelCase_= 1
UpperCAmelCase_= 1
UpperCAmelCase_= 1_26
UpperCAmelCase_= 7
UpperCAmelCase_= 255.0
UpperCAmelCase_= """"""
return config
def __a ( lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
UpperCAmelCase_= name.replace("""patch_embed.proj""" ,"""embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
UpperCAmelCase_= name.replace("""patch_embed.norm""" ,"""embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
UpperCAmelCase_= name.replace("""layers""" ,"""encoder.stages""" )
if "residual_group.blocks" in name:
UpperCAmelCase_= name.replace("""residual_group.blocks""" ,"""layers""" )
if "attn.proj" in name:
UpperCAmelCase_= name.replace("""attn.proj""" ,"""attention.output.dense""" )
if "attn" in name:
UpperCAmelCase_= name.replace("""attn""" ,"""attention.self""" )
if "norm1" in name:
UpperCAmelCase_= name.replace("""norm1""" ,"""layernorm_before""" )
if "norm2" in name:
UpperCAmelCase_= name.replace("""norm2""" ,"""layernorm_after""" )
if "mlp.fc1" in name:
UpperCAmelCase_= name.replace("""mlp.fc1""" ,"""intermediate.dense""" )
if "mlp.fc2" in name:
UpperCAmelCase_= name.replace("""mlp.fc2""" ,"""output.dense""" )
if "q_bias" in name:
UpperCAmelCase_= name.replace("""q_bias""" ,"""query.bias""" )
if "k_bias" in name:
UpperCAmelCase_= name.replace("""k_bias""" ,"""key.bias""" )
if "v_bias" in name:
UpperCAmelCase_= name.replace("""v_bias""" ,"""value.bias""" )
if "cpb_mlp" in name:
UpperCAmelCase_= name.replace("""cpb_mlp""" ,"""continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
UpperCAmelCase_= name.replace("""patch_embed.proj""" ,"""patch_embed.projection""" )
if name == "norm.weight":
UpperCAmelCase_= """layernorm.weight"""
if name == "norm.bias":
UpperCAmelCase_= """layernorm.bias"""
if "conv_first" in name:
UpperCAmelCase_= name.replace("""conv_first""" ,"""first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
UpperCAmelCase_= name.replace("""conv_last""" ,"""final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
UpperCAmelCase_= name.replace("""conv_before_upsample.0""" ,"""conv_before_upsample""" )
if "upsample.0" in name:
UpperCAmelCase_= name.replace("""upsample.0""" ,"""upsample.convolution_0""" )
if "upsample.2" in name:
UpperCAmelCase_= name.replace("""upsample.2""" ,"""upsample.convolution_1""" )
UpperCAmelCase_= """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
UpperCAmelCase_= name.replace("""upsample.0.weight""" ,"""upsample.conv.weight""" )
UpperCAmelCase_= name.replace("""upsample.0.bias""" ,"""upsample.conv.bias""" )
else:
pass
else:
UpperCAmelCase_= """swin2sr.""" + name
return name
def __a ( lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_= orig_state_dict.pop(lowerCAmelCase_ )
if "qkv" in key:
UpperCAmelCase_= key.split(""".""" )
UpperCAmelCase_= int(key_split[1] )
UpperCAmelCase_= int(key_split[4] )
UpperCAmelCase_= config.embed_dim
if "weight" in key:
UpperCAmelCase_= val[:dim, :]
UpperCAmelCase_= val[dim : dim * 2, :]
UpperCAmelCase_= val[-dim:, :]
else:
UpperCAmelCase_= val[:dim]
UpperCAmelCase_= val[dim : dim * 2]
UpperCAmelCase_= val[-dim:]
pass
else:
UpperCAmelCase_= val
return orig_state_dict
def __a ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_= get_config(lowerCAmelCase_ )
UpperCAmelCase_= SwinaSRForImageSuperResolution(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_= torch.hub.load_state_dict_from_url(lowerCAmelCase_ ,map_location="""cpu""" )
UpperCAmelCase_= convert_state_dict(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase_, UpperCAmelCase_= model.load_state_dict(lowerCAmelCase_ ,strict=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowerCAmelCase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"""Unexpected key {key} in state_dict""" )
# verify values
UpperCAmelCase_= """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
UpperCAmelCase_= Image.open(requests.get(lowerCAmelCase_ ,stream=lowerCAmelCase_ ).raw ).convert("""RGB""" )
UpperCAmelCase_= SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
UpperCAmelCase_= 1_26 if """Jpeg""" in checkpoint_url else 2_56
UpperCAmelCase_= Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
UpperCAmelCase_= transforms(lowerCAmelCase_ ).unsqueeze(0 )
if config.num_channels == 1:
UpperCAmelCase_= pixel_values[:, 0, :, :].unsqueeze(1 )
UpperCAmelCase_= model(lowerCAmelCase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
UpperCAmelCase_= torch.Size([1, 3, 5_12, 5_12] )
UpperCAmelCase_= torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCAmelCase_= torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_= torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
UpperCAmelCase_= torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_= torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCAmelCase_= torch.Size([1, 3, 5_12, 5_12] )
UpperCAmelCase_= torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCAmelCase_= torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_= torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] ,lowerCAmelCase_ ,atol=1E-3 )
print("""Looks ok!""" )
UpperCAmelCase_= {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
UpperCAmelCase_= url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
model.push_to_hub(F"""caidas/{model_name}""" )
processor.push_to_hub(F"""caidas/{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
__A = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
593
0
from __future__ import annotations
from math import gcd
def A ( lowercase__ : int , lowercase__ : int = 2 , lowercase__ : int = 1 , lowercase__ : int = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int:
return (pow(UpperCamelCase__ , 2 ) + step) % modulus
for _ in range(UpperCamelCase__ ):
# These track the position within the cycle detection logic.
UpperCamelCase__ :List[Any] = seed
UpperCamelCase__ :Dict = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
UpperCamelCase__ :Tuple = rand_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase__ :Any = rand_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase__ :int = rand_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
UpperCamelCase__ :List[Any] = gcd(hare - tortoise , UpperCamelCase__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
UpperCamelCase__ :Optional[int] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
UpperCamelCase = parser.parse_args()
UpperCamelCase = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'''{args.num} is probably prime''')
else:
UpperCamelCase = args.num // divisor
print(f'''{args.num} = {divisor} * {quotient}''')
720
from __future__ import annotations
def A ( lowercase__ : list[int] ) -> int:
if not nums:
return 0
UpperCamelCase__ :Dict = nums[0]
UpperCamelCase__ :Dict = 0
for num in nums[1:]:
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = (
max_excluding + num,
max(lowercase__ , lowercase__ ),
)
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
383
0
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = ""
_A = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_A = None # compression type in fsspec. ex: "gzip"
_A = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self: Union[str, Any], lowerCamelCase_: str = "", lowerCamelCase_: Optional[str] = None, lowerCamelCase_: Optional[dict] = None, **lowerCamelCase_: int ):
super().__init__(self, **lowerCamelCase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowercase__ : Dict = fsspec.open(
lowerCamelCase_, mode='rb', protocol=lowerCamelCase_, compression=self.compression, client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
lowercase__ : List[Any] = os.path.basename(self.file.path.split('::' )[0] )
lowercase__ : Optional[int] = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
lowercase__ : str = None
@classmethod
def snake_case__( cls: Optional[int], lowerCamelCase_: List[Any] ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCamelCase_ ).lstrip('/' )
def snake_case__( self: Union[str, Any] ):
if self.dir_cache is None:
lowercase__ : Optional[Any] = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
lowercase__ : Tuple = {f['name']: f}
def snake_case__( self: int, lowerCamelCase_: str ):
return self.file.open().read()
def snake_case__( self: Optional[Any], lowerCamelCase_: str, lowerCamelCase_: str = "rb", lowerCamelCase_: Optional[int]=None, lowerCamelCase_: Union[str, Any]=True, lowerCamelCase_: Optional[Any]=None, **lowerCamelCase_: Tuple, ):
lowercase__ : List[str] = self._strip_protocol(lowerCamelCase_ )
if mode != "rb":
raise ValueError(F"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = "bz2"
_A = "bz2"
_A = ".bz2"
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = "gzip"
_A = "gzip"
_A = ".gz"
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = "lz4"
_A = "lz4"
_A = ".lz4"
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = "xz"
_A = "xz"
_A = ".xz"
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = "zstd"
_A = "zstd"
_A = ".zst"
def __init__( self: Union[str, Any], lowerCamelCase_: str, lowerCamelCase_: str = "rb", lowerCamelCase_: Optional[str] = None, lowerCamelCase_: Optional[dict] = None, lowerCamelCase_: int = DEFAULT_BLOCK_SIZE, **lowerCamelCase_: List[str], ):
super().__init__(
fo=lowerCamelCase_, mode=lowerCamelCase_, target_protocol=lowerCamelCase_, target_options=lowerCamelCase_, block_size=lowerCamelCase_, **lowerCamelCase_, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowercase__ : Tuple = self.file.__enter__
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: List[Any], lowerCamelCase_: str ):
lowercase__ : Tuple = file_
def __enter__( self: List[Any] ):
self._file.__enter__()
return self
def __exit__( self: Tuple, *lowerCamelCase_: Optional[Any], **lowerCamelCase_: str ):
self._file.__exit__(*lowerCamelCase_, **lowerCamelCase_ )
def __iter__( self: Tuple ):
return iter(self._file )
def snake_case__( self: Optional[Any] ):
return next(self._file )
def __getattr__( self: int, lowerCamelCase_: List[str] ):
return getattr(self._file, lowerCamelCase_ )
def fixed_enter(*lowerCamelCase_: Union[str, Any], **lowerCamelCase_: Union[str, Any] ):
return WrappedFile(_enter(*lowerCamelCase_, **lowerCamelCase_ ) )
lowercase__ : List[str] = fixed_enter
266
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( _lowercase : list[float] , _lowercase : Tuple ) -> int:
'''simple docstring'''
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(_lowercase ):
print(f"""{i}\t\t{d}""" )
def SCREAMING_SNAKE_CASE__ ( _lowercase : list[dict[str, int]] , _lowercase : list[float] , _lowercase : int ) -> Any:
'''simple docstring'''
for j in range(_lowercase ):
lowercase__ , lowercase__ , lowercase__ : Dict = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def SCREAMING_SNAKE_CASE__ ( _lowercase : list[dict[str, int]] , _lowercase : int , _lowercase : int , _lowercase : int ) -> list[float]:
'''simple docstring'''
lowercase__ : Dict = [float('inf' )] * vertex_count
lowercase__ : Dict = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_lowercase ):
lowercase__ , lowercase__ , lowercase__ : int = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
lowercase__ : str = distance[u] + w
lowercase__ : str = check_negative_cycle(_lowercase , _lowercase , _lowercase )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase: Optional[int] = int(input("""Enter number of vertices: """).strip())
__UpperCamelCase: Union[str, Any] = int(input("""Enter number of edges: """).strip())
__UpperCamelCase: list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase: List[str] = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
__UpperCamelCase: List[Any] = {"""src""": src, """dst""": dest, """weight""": weight}
__UpperCamelCase: Optional[int] = int(input("""\nEnter shortest path source:""").strip())
__UpperCamelCase: Dict = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
a_ : List[Any] = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
a_ : Union[str, Any] = flatten_dict(SCREAMING_SNAKE_CASE_ )
return flax_params
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
a_ : List[str] = {}
a_ : str = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
a_ : str = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
a_ : Dict = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
a_ : List[str] = new_key.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
a_ : List[str] = new_key.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
a_ : Union[str, Any] = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , SCREAMING_SNAKE_CASE_ )
a_ : str = new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
a_ : List[Any] = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , SCREAMING_SNAKE_CASE_ )
a_ : Any = flax_dict[key]
a_ : Optional[Any] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
a_ : List[str] = torch.from_numpy(converted_dict[key].T )
else:
a_ : Optional[int] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : List[Any]=False ):
"""simple docstring"""
a_ : Any = get_flax_param(SCREAMING_SNAKE_CASE_ )
if not use_large:
a_ : Union[str, Any] = PixaStructVisionConfig()
a_ : List[Any] = PixaStructTextConfig()
else:
a_ : List[Any] = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
a_ : Dict = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
a_ : Union[str, Any] = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE_ )
a_ : Any = PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
a_ : Any = rename_and_convert_flax_params(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
a_ : str = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
a_ : Dict = PixaStructImageProcessor()
a_ : List[Any] = PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
if use_large:
a_ : Dict = 40_96
a_ : int = True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
print("""Model saved in {}""".format(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
419
import datasets
from .evaluate import evaluate
SCREAMING_SNAKE_CASE : Union[str, Any] = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
SCREAMING_SNAKE_CASE : List[Any] = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
SCREAMING_SNAKE_CASE : Any = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def A ( self ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def A ( self , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
"""simple docstring"""
a_ : Optional[int] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
a_ : Dict = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
a_ : Optional[Any] = evaluate(dataset=UpperCamelCase_ , predictions=UpperCamelCase_ )
return score
419
1
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__UpperCAmelCase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__UpperCAmelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__UpperCAmelCase = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
__UpperCAmelCase = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def _lowerCamelCase ( A_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] =None
# source code of `config_class`
UpperCamelCase__ : List[str] =inspect.getsource(A_ )
UpperCamelCase__ : Optional[Any] =_re_checkpoint.findall(A_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
UpperCamelCase__ : Optional[Any] =ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase__ : Dict =f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
UpperCamelCase__ : List[str] =ckpt_name
break
return checkpoint
def _lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : List[Any] =[]
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCamelCase__ : Optional[Any] =get_checkpoint_from_config_class(A_ )
UpperCamelCase__ : List[str] =config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A_ )
if len(A_ ) > 0:
UpperCamelCase__ : Dict ="\n".join(sorted(A_ ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
from math import sqrt
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( SCREAMING_SNAKE_CASE_ : List[Any] = 1_0_0_0_1 ):
"""simple docstring"""
UpperCamelCase : Dict = 0
UpperCamelCase : Tuple = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def SCREAMING_SNAKE_CASE_ ( )-> Generator[int, None, None]:
_lowerCamelCase = {}
_lowerCamelCase = 2
while True:
_lowerCamelCase = factor_map.pop(snake_case , snake_case )
if factor:
_lowerCamelCase = factor + prime
while x in factor_map:
x += factor
_lowerCamelCase = factor
else:
_lowerCamelCase = prime
yield prime
prime += 1
def SCREAMING_SNAKE_CASE_ ( snake_case : float = 1e10 )-> int:
_lowerCamelCase = sieve()
_lowerCamelCase = 1
while True:
_lowerCamelCase = next(snake_case )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(snake_case )
n += 2
if __name__ == "__main__":
print(solution())
222
0
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : str = model.config
A : Tuple = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
A : Any = MBartConfig(
is_decoder=snake_case__ , is_encoder_decoder=snake_case__ , add_cross_attention=snake_case__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=snake_case__ , add_final_layer_norm=snake_case__ , )
return encoder_config, decoder_config
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if "encoder.model" in name:
A : Dict = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
A : Any = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
A : int = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
A : List[Any] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
A : Union[str, Any] = '''encoder.''' + name
if "attn.proj" in name:
A : Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
A : int = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
A : Optional[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A : List[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
A : List[str] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
A : List[Any] = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
A : Dict = '''encoder.layernorm.bias'''
return name
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A : Dict = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
A : Any = key.split('''.''' )
A : Dict = int(key_split[3] )
A : int = int(key_split[5] )
A : Union[str, Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A : List[str] = val[:dim, :]
A : Optional[int] = val[dim : dim * 2, :]
A : str = val[-dim:, :]
else:
A : List[str] = val[:dim]
A : Tuple = val[dim : dim * 2]
A : List[str] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
A : Any = val
return orig_state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__=None , snake_case__=False ):
'''simple docstring'''
A : Optional[int] = DonutModel.from_pretrained(snake_case__ ).eval()
# load HuggingFace model
A, A : Union[str, Any] = get_configs(snake_case__ )
A : int = DonutSwinModel(snake_case__ )
A : Optional[Any] = MBartForCausalLM(snake_case__ )
A : Any = VisionEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
model.eval()
A : Any = original_model.state_dict()
A : Union[str, Any] = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# verify results on scanned document
A : List[str] = load_dataset('''hf-internal-testing/example-documents''' )
A : Optional[Any] = dataset['''test'''][0]['''image'''].convert('''RGB''' )
A : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained(snake_case__ , from_slow=snake_case__ )
A : Optional[Any] = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
A : int = DonutProcessor(snake_case__ , snake_case__ )
A : Dict = processor(snake_case__ , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
A : int = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
A : Optional[Any] = '''When is the coffee break?'''
A : Optional[Any] = task_prompt.replace('''{user_input}''' , snake_case__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
A : Any = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
A : str = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
A : Optional[Any] = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
A : List[str] = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
A : List[Any] = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
A : Union[str, Any] = original_model.decoder.tokenizer(snake_case__ , add_special_tokens=snake_case__ , return_tensors='''pt''' )[
'''input_ids'''
]
A : List[str] = original_model.encoder.model.patch_embed(snake_case__ )
A, A : Dict = model.encoder.embeddings(snake_case__ )
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
# verify encoder hidden states
A : List[str] = original_model.encoder(snake_case__ )
A : Optional[Any] = model.encoder(snake_case__ ).last_hidden_state
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-2 )
# verify decoder hidden states
A : Optional[int] = original_model(snake_case__ , snake_case__ , snake_case__ ).logits
A : List[Any] = model(snake_case__ , decoder_input_ids=snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
lowercase : Optional[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
634
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Optional[int] = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class A ( __snake_case ):
__magic_name__ = '''switch_transformers'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , SCREAMING_SNAKE_CASE=32128 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0.01 , SCREAMING_SNAKE_CASE="float32" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.001 , SCREAMING_SNAKE_CASE=0.001 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="relu" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , **SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
A : Union[str, Any] = vocab_size
A : Any = d_model
A : Dict = d_kv
A : Optional[Any] = d_ff
A : List[Any] = num_sparse_encoder_layers
A : List[Any] = num_layers
A : Optional[int] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A : List[str] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
A : int = self.num_layers // self.num_sparse_encoder_layers
else:
A : int = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
A : str = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
A : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
A : List[Any] = num_heads
A : str = num_experts
A : Dict = expert_capacity
A : Optional[int] = router_bias
A : Any = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
A : Dict = router_dtype
A : int = router_ignore_padding_tokens
A : List[Any] = relative_attention_num_buckets
A : Dict = relative_attention_max_distance
A : Optional[int] = dropout_rate
A : Dict = layer_norm_epsilon
A : Optional[int] = initializer_factor
A : Union[str, Any] = feed_forward_proj
A : Any = use_cache
A : Tuple = add_router_probs
A : List[str] = router_z_loss_coef
A : str = router_aux_loss_coef
A : Union[str, Any] = self.feed_forward_proj.split('''-''' )
A : Optional[Any] = act_info[-1]
A : Union[str, Any] = act_info[0] == '''gated'''
if len(SCREAMING_SNAKE_CASE ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A : str = '''gelu_new'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : list[int] ): # This function is recursive
__UpperCAmelCase : Optional[Any] = len(__lowerCamelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__UpperCAmelCase : str = array[0]
__UpperCAmelCase : int = False
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[Any] = [element for element in array[i:] if element >= array[i]]
__UpperCAmelCase : List[str] = longest_subsequence(__lowerCamelCase )
if len(__lowerCamelCase ) > len(__lowerCamelCase ):
__UpperCAmelCase : str = temp_array
else:
i += 1
__UpperCAmelCase : Tuple = [element for element in array[1:] if element >= pivot]
__UpperCAmelCase : Union[str, Any] = [pivot, *longest_subsequence(__lowerCamelCase )]
if len(__lowerCamelCase ) > len(__lowerCamelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
63
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 256
# Modulus to hash a string
SCREAMING_SNAKE_CASE__ = 100_0003
def lowerCamelCase ( _snake_case : str ,_snake_case : str ):
'''simple docstring'''
lowercase__ = len(_snake_case )
lowercase__ = len(_snake_case )
if p_len > t_len:
return False
lowercase__ = 0
lowercase__ = 0
lowercase__ = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
lowercase__ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowercase__ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowercase__ = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowercase__ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = "abc1abc12"
lowercase__ = "alskfjaldsabc1abc1abc12k23adsfabcabc"
lowercase__ = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_snake_case ,_snake_case ) and not rabin_karp(_snake_case ,_snake_case )
# Test 2)
lowercase__ = "ABABX"
lowercase__ = "ABABZABABYABABX"
assert rabin_karp(_snake_case ,_snake_case )
# Test 3)
lowercase__ = "AAAB"
lowercase__ = "ABAAAAAB"
assert rabin_karp(_snake_case ,_snake_case )
# Test 4)
lowercase__ = "abcdabcy"
lowercase__ = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_snake_case ,_snake_case )
# Test 5)
lowercase__ = "Lü"
lowercase__ = "Lüsai"
assert rabin_karp(_snake_case ,_snake_case )
lowercase__ = "Lue"
assert not rabin_karp(_snake_case ,_snake_case )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
267
0
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __A ( lowerCAmelCase_ = "isbn/0140328726" ):
_UpperCAmelCase : Optional[Any] = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
_UpperCAmelCase : Any = f"{olid} is not a valid Open Library olid"
raise ValueError(lowerCAmelCase_ )
return requests.get(f"https://openlibrary.org/{new_olid}.json" ).json()
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : int = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
_UpperCAmelCase : int = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_UpperCAmelCase : int = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
_UpperCAmelCase : Any = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = """, """.join(lowerCAmelCase_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCAmelCase_ : int = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.")
continue
print(F"\nSearching Open Library for ISBN: {isbn}...\n")
try:
lowerCAmelCase_ : str = summarize_book(get_openlibrary_data(F"isbn/{isbn}"))
print('''\n'''.join(F"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"Sorry, there are no results for ISBN: {isbn}.")
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _snake_case ( A ) -> Optional[Any]:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class a__ ( a__ ):
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=lowerCamelCase_ , default=lowerCamelCase_ , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=lowerCamelCase_ , help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCamelCase_ )
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
lowerCAmelCase__ = model
lowerCAmelCase__ = cache
lowerCAmelCase__ = force
lowerCAmelCase__ = trust_remote_code
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowerCamelCase = logging.get_logger(__name__)
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : Optional[int] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase)
454
0
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
93
'''simple docstring'''
from __future__ import annotations
from collections import deque
class a__ :
def __init__( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : list[dict] = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(_UpperCamelCase )
self.set_fail_transitions()
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : int = 0
for character in keyword:
_lowercase : Optional[Any] = self.find_next_state(_UpperCamelCase , _UpperCamelCase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
_lowercase : Any = len(self.adlist ) - 1
else:
_lowercase : Dict = next_state
self.adlist[current_state]["output"].append(_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(_UpperCamelCase )
_lowercase : Optional[Any] = 0
while q:
_lowercase : Dict = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_UpperCamelCase )
_lowercase : Tuple = self.adlist[r]["fail_state"]
while (
self.find_next_state(_UpperCamelCase , self.adlist[child]["value"] ) is None
and state != 0
):
_lowercase : Dict = self.adlist[state]["fail_state"]
_lowercase : Any = self.find_next_state(
_UpperCamelCase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
_lowercase : List[str] = 0
_lowercase : Dict = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : dict = {} # returns a dict with keywords and list of its occurrences
_lowercase : Optional[Any] = 0
for i in range(len(_UpperCamelCase ) ):
while (
self.find_next_state(_UpperCamelCase , string[i] ) is None
and current_state != 0
):
_lowercase : Union[str, Any] = self.adlist[current_state]["fail_state"]
_lowercase : str = self.find_next_state(_UpperCamelCase , string[i] )
if next_state is None:
_lowercase : int = 0
else:
_lowercase : str = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_lowercase : Optional[Any] = []
result[key].append(i - len(_UpperCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_A : List[str] ='''examples/'''
_A : Any ={
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_A : int ={
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
_A : int ='''README.md'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : List[str] = f.read()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : Dict = replace.replace("""VERSION""" , UpperCamelCase )
lowerCamelCase__ : str = re_pattern.sub(UpperCamelCase , UpperCamelCase )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
for folder, directories, fnames in os.walk(UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , pattern="""examples""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if not patch:
update_version_in_examples(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
lowerCamelCase__ : Dict = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ : Dict = """1. Want to contribute a new model?"""
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : int = f.readlines()
# Find the start of the list.
lowerCamelCase__ : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ : List[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ : int = f.read()
lowerCamelCase__ : Optional[Any] = REPLACE_PATTERNS["""init"""][0].search(UpperCamelCase ).groups()[0]
return packaging.version.parse(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase=False ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ : List[str] = default_version.base_version
elif patch:
lowerCamelCase__ : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : Any = input(f'''Which version are you releasing? [{default_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Optional[int] = default_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase , patch=UpperCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE_ () -> List[str]:
lowerCamelCase__ : Optional[int] = get_version()
lowerCamelCase__ : Any = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ : Any = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : List[Any] = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Dict = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_A : List[str] =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__UpperCAmelCase : List[str] = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
__UpperCAmelCase : List[str] = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
__UpperCAmelCase : Union[str, Any] = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
return float((preds == labels).mean() )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a : int = simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )
_a : List[Any] = float(fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a : str = float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )
_a : str = float(spearmanr(UpperCamelCase_ , UpperCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def snake_case_ ( self : Dict ) -> Union[str, Any]:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def snake_case_ ( self : Optional[int] , __snake_case : Any , __snake_case : Any ) -> Union[str, Any]:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__snake_case , __snake_case )}
elif self.config_name == "stsb":
return pearson_and_spearman(__snake_case , __snake_case )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__snake_case , __snake_case )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__snake_case , __snake_case )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
471
1
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ : Any = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase_ : Optional[int] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase_ : Tuple = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase_ : Tuple = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
lowerCAmelCase_ : int = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
lowerCAmelCase_ : Optional[Any] = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
lowerCAmelCase_ : int = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
lowerCAmelCase_ : Dict = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
lowerCAmelCase_ : int = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__a =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a =DPRContextEncoderTokenizer
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__a =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a =DPRQuestionEncoderTokenizer
lowerCAmelCase_ : str = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
lowerCAmelCase_ : int = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
lowerCAmelCase_ : Optional[Any] = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__A )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __call__( self : Optional[int] , __a : int , __a : Optional[str] = None , __a : Optional[str] = None , __a : Union[bool, str] = False , __a : Union[bool, str] = False , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[bool] = None , **__a : Optional[Any] , ):
if titles is None and texts is None:
return super().__call__(
__a , padding=__a , truncation=__a , max_length=__a , return_tensors=__a , return_attention_mask=__a , **__a , )
elif titles is None or texts is None:
_a = titles if texts is None else texts
return super().__call__(
__a , __a , padding=__a , truncation=__a , max_length=__a , return_tensors=__a , return_attention_mask=__a , **__a , )
_a = titles if not isinstance(__a , __a ) else [titles]
_a = texts if not isinstance(__a , __a ) else [texts]
_a = len(__a )
_a = questions if not isinstance(__a , __a ) else [questions] * n_passages
assert len(__a ) == len(
__a ), f'There should be as many titles than texts but got {len(__a )} titles and {len(__a )} texts.'
_a = super().__call__(__a , __a , padding=__a , truncation=__a )['''input_ids''']
_a = super().__call__(__a , add_special_tokens=__a , padding=__a , truncation=__a )['''input_ids''']
_a = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__a , __a )
]
}
if return_attention_mask is not False:
_a = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_a = attention_mask
return self.pad(__a , padding=__a , max_length=__a , return_tensors=__a )
def UpperCamelCase__ ( self : Dict , __a : BatchEncoding , __a : DPRReaderOutput , __a : int = 16 , __a : int = 64 , __a : int = 4 , ):
_a = reader_input['''input_ids''']
_a = reader_output[:3]
_a = len(__a )
_a = sorted(range(__a ) , reverse=__a , key=relevance_logits.__getitem__ )
_a = []
for doc_id in sorted_docs:
_a = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_a = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_a = sequence_ids.index(self.pad_token_id )
else:
_a = len(__a )
_a = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__a , top_spans=__a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__a , start_index=__a , end_index=__a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase__ ( self : Union[str, Any] , __a : List[int] , __a : List[int] , __a : int , __a : int , ):
_a = []
for start_index, start_score in enumerate(__a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_a = sorted(__a , key=lambda __a : x[1] , reverse=__a )
_a = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
_a = end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__A )
class __SCREAMING_SNAKE_CASE (__A , __A ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =READER_PRETRAINED_VOCAB_FILES_MAP
__a =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =READER_PRETRAINED_INIT_CONFIGURATION
__a =['input_ids', 'attention_mask']
__a =DPRReaderTokenizer
708
'''simple docstring'''
import math
from collections.abc import Callable
def _lowerCamelCase ( lowercase : Callable[[float], float] , lowercase : float , lowercase : float ) -> float:
_a = xa
_a = xa
while True:
if x_n == x_na or function(lowercase ) == function(lowercase ):
raise ZeroDivisionError("float division by zero, could not find root" )
_a = x_na - (
function(lowercase ) / ((function(lowercase ) - function(lowercase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
_a = x_na
_a = x_na
def _lowerCamelCase ( lowercase : float ) -> float:
return math.pow(lowercase , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
521
0
def A ( snake_case__ : int , snake_case__ : int ) -> int:
'''simple docstring'''
return number | (1 << position)
def A ( snake_case__ : int , snake_case__ : int ) -> int:
'''simple docstring'''
return number & ~(1 << position)
def A ( snake_case__ : int , snake_case__ : int ) -> int:
'''simple docstring'''
return number ^ (1 << position)
def A ( snake_case__ : int , snake_case__ : int ) -> bool:
'''simple docstring'''
return ((number >> position) & 1) == 1
def A ( snake_case__ : int , snake_case__ : int ) -> int:
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
313
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ : Optional[int] = "PoolFormerConfig"
# Base docstring
UpperCAmelCase__ : Optional[int] = "sail/poolformer_s12"
UpperCAmelCase__ : Any = [1, 5_12, 7, 7]
# Image classification docstring
UpperCAmelCase__ : List[str] = "sail/poolformer_s12"
UpperCAmelCase__ : Any = "tabby, tabby cat"
UpperCAmelCase__ : Tuple = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def A ( snake_case__ : int , snake_case__ : float = 0.0 , snake_case__ : bool = False ) -> Dict:
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
__snake_case = 1 - drop_prob
__snake_case = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__snake_case = keep_prob + torch.rand(snake_case__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
__snake_case = input.div(snake_case__ ) * random_tensor
return output
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ = None) -> None:
super().__init__()
__snake_case = drop_prob
def _a ( self , lowercase_) -> torch.Tensor:
return drop_path(lowercase_ , self.drop_prob , self.training)
def _a ( self) -> str:
return "p={}".format(self.drop_prob)
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> str:
super().__init__()
__snake_case = patch_size if isinstance(lowercase_ , collections.abc.Iterable) else (patch_size, patch_size)
__snake_case = stride if isinstance(lowercase_ , collections.abc.Iterable) else (stride, stride)
__snake_case = padding if isinstance(lowercase_ , collections.abc.Iterable) else (padding, padding)
__snake_case = nn.Convad(lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=lowercase_)
__snake_case = norm_layer(lowercase_) if norm_layer else nn.Identity()
def _a ( self , lowercase_) -> int:
__snake_case = self.projection(lowercase_)
__snake_case = self.norm(lowercase_)
return embeddings
class __lowercase ( nn.GroupNorm ):
def __init__( self , lowercase_ , **lowercase_) -> Dict:
super().__init__(1 , lowercase_ , **lowercase_)
class __lowercase ( nn.Module ):
def __init__( self , lowercase_) -> Optional[int]:
super().__init__()
__snake_case = nn.AvgPoolad(lowercase_ , stride=1 , padding=pool_size // 2 , count_include_pad=lowercase_)
def _a ( self , lowercase_) -> str:
return self.pool(lowercase_) - hidden_states
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Dict:
super().__init__()
__snake_case = nn.Convad(lowercase_ , lowercase_ , 1)
__snake_case = nn.Convad(lowercase_ , lowercase_ , 1)
__snake_case = PoolFormerDropPath(lowercase_)
if isinstance(config.hidden_act , lowercase_):
__snake_case = ACTaFN[config.hidden_act]
else:
__snake_case = config.hidden_act
def _a ( self , lowercase_) -> int:
__snake_case = self.conva(lowercase_)
__snake_case = self.act_fn(lowercase_)
__snake_case = self.drop(lowercase_)
__snake_case = self.conva(lowercase_)
__snake_case = self.drop(lowercase_)
return hidden_states
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Any:
super().__init__()
__snake_case = PoolFormerPooling(lowercase_)
__snake_case = PoolFormerOutput(lowercase_ , lowercase_ , lowercase_ , lowercase_)
__snake_case = PoolFormerGroupNorm(lowercase_)
__snake_case = PoolFormerGroupNorm(lowercase_)
# Useful for training neural nets
__snake_case = PoolFormerDropPath(lowercase_) if drop_path > 0.0 else nn.Identity()
__snake_case = config.use_layer_scale
if config.use_layer_scale:
__snake_case = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowercase_)) , requires_grad=lowercase_)
__snake_case = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowercase_)) , requires_grad=lowercase_)
def _a ( self , lowercase_) -> Dict:
if self.use_layer_scale:
__snake_case = self.pooling(self.before_norm(lowercase_))
__snake_case = self.layer_scale_a.unsqueeze(-1).unsqueeze(-1) * pooling_output
# First residual connection
__snake_case = hidden_states + self.drop_path(lowercase_)
__snake_case = ()
__snake_case = self.output(self.after_norm(lowercase_))
__snake_case = self.layer_scale_a.unsqueeze(-1).unsqueeze(-1) * layer_output
# Second residual connection
__snake_case = hidden_states + self.drop_path(lowercase_)
__snake_case = (output,) + outputs
return outputs
else:
__snake_case = self.drop_path(self.pooling(self.before_norm(lowercase_)))
# First residual connection
__snake_case = pooling_output + hidden_states
__snake_case = ()
# Second residual connection inside the PoolFormerOutput block
__snake_case = self.drop_path(self.output(self.after_norm(lowercase_)))
__snake_case = hidden_states + layer_output
__snake_case = (output,) + outputs
return outputs
class __lowercase ( nn.Module ):
def __init__( self , lowercase_) -> Dict:
super().__init__()
__snake_case = config
# stochastic depth decay rule
__snake_case = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths))]
# patch embeddings
__snake_case = []
for i in range(config.num_encoder_blocks):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ))
__snake_case = nn.ModuleList(lowercase_)
# Transformer blocks
__snake_case = []
__snake_case = 0
for i in range(config.num_encoder_blocks):
# each block consists of layers
__snake_case = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i]):
layers.append(
PoolFormerLayer(
lowercase_ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio) , drop_path=dpr[cur + j] , ))
blocks.append(nn.ModuleList(lowercase_))
__snake_case = nn.ModuleList(lowercase_)
def _a ( self , lowercase_ , lowercase_=False , lowercase_=True) -> List[str]:
__snake_case = () if output_hidden_states else None
__snake_case = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block)):
__snake_case , __snake_case = layers
# Get patch embeddings from hidden_states
__snake_case = embedding_layer(lowercase_)
# Send the embeddings through the blocks
for _, blk in enumerate(lowercase_):
__snake_case = blk(lowercase_)
__snake_case = layer_outputs[0]
if output_hidden_states:
__snake_case = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_)
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = PoolFormerConfig
__UpperCAmelCase = '''poolformer'''
__UpperCAmelCase = '''pixel_values'''
__UpperCAmelCase = True
def _a ( self , lowercase_) -> List[str]:
if isinstance(lowercase_ , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowercase_ , nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _a ( self , lowercase_ , lowercase_=False) -> int:
if isinstance(lowercase_ , lowercase_):
__snake_case = value
UpperCAmelCase__ : Optional[Any] = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ : List[str] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , lowerCamelCase__ , )
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_) -> Optional[Any]:
super().__init__(lowercase_)
__snake_case = config
__snake_case = PoolFormerEncoder(lowercase_)
# Initialize weights and apply final processing
self.post_init()
def _a ( self) -> List[str]:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(lowercase_)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _a ( self , lowercase_ = None , lowercase_ = None , lowercase_ = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
__snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
__snake_case = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ , )
__snake_case = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
class __lowercase ( nn.Module ):
def __init__( self , lowercase_) -> List[str]:
super().__init__()
__snake_case = nn.Linear(config.hidden_size , config.hidden_size)
def _a ( self , lowercase_) -> List[Any]:
__snake_case = self.dense(lowercase_)
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , lowerCamelCase__ , )
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_) -> str:
super().__init__(lowercase_)
__snake_case = config.num_labels
__snake_case = PoolFormerModel(lowercase_)
# Final norm
__snake_case = PoolFormerGroupNorm(config.hidden_sizes[-1])
# Classifier head
__snake_case = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _a ( self , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.poolformer(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ , )
__snake_case = outputs[0]
__snake_case = self.classifier(self.norm(lowercase_).mean([-2, -1]))
__snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__snake_case = 'single_label_classification'
else:
__snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
__snake_case = MSELoss()
if self.num_labels == 1:
__snake_case = loss_fct(logits.squeeze() , labels.squeeze())
else:
__snake_case = loss_fct(lowercase_ , lowercase_)
elif self.config.problem_type == "single_label_classification":
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__snake_case = BCEWithLogitsLoss()
__snake_case = loss_fct(lowercase_ , lowercase_)
if not return_dict:
__snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states)
313
1
'''simple docstring'''
from math import factorial
def _A ( _lowerCAmelCase = 20 ):
"""simple docstring"""
__lowercase =2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
__lowercase =n // 2
return int(factorial(_lowerCAmelCase ) / (factorial(_lowerCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowerCamelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
454
'''simple docstring'''
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('only integers accepted as input' )
else:
__lowercase =str(abs(_lowerCAmelCase ) )
__lowercase =[list(_lowerCAmelCase ) for char in range(len(_lowerCAmelCase ) )]
for index in range(len(_lowerCAmelCase ) ):
num_transpositions[index].pop(_lowerCAmelCase )
return max(
int(''.join(list(_lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
454
1
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def _snake_case ( lowerCAmelCase : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : Optional[int] = job["started_at"]
SCREAMING_SNAKE_CASE_ : int = job["completed_at"]
SCREAMING_SNAKE_CASE_ : Any = date_parser.parse(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = date_parser.parse(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = round((end_datetime - start_datetime).total_seconds() / 60.0 )
SCREAMING_SNAKE_CASE_ : Any = start
SCREAMING_SNAKE_CASE_ : Optional[int] = end
SCREAMING_SNAKE_CASE_ : Dict = duration_in_min
return job_info
def _snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = None
if token is not None:
SCREAMING_SNAKE_CASE_ : Dict = {"Accept": "application/vnd.github+json", "Authorization": f'Bearer {token}'}
SCREAMING_SNAKE_CASE_ : Tuple = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
SCREAMING_SNAKE_CASE_ : Any = requests.get(lowerCAmelCase , headers=lowerCAmelCase ).json()
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(lowerCAmelCase ) for job in result["jobs"]} )
SCREAMING_SNAKE_CASE_ : Optional[Any] = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = requests.get(url + f'&page={i + 2}' , headers=lowerCAmelCase ).json()
job_time.update({job["name"]: extract_time_from_single_job(lowerCAmelCase ) for job in result["jobs"]} )
return job_time
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
__lowerCamelCase : Tuple = parser.parse_args()
__lowerCamelCase : Tuple = get_job_time(args.workflow_run_id)
__lowerCamelCase : Optional[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v['duration']}''')
216
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : str = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : List[str] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__lowerCamelCase : Tuple = {
'''moussaKam/mbarthez''': 10_24,
'''moussaKam/barthez''': 10_24,
'''moussaKam/barthez-orangesum-title''': 10_24,
}
__lowerCamelCase : Optional[int] = '''▁'''
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
A = BarthezTokenizer
def __init__( self : Optional[int],_A : int=None,_A : List[Any]=None,_A : Union[str, Any]="<s>",_A : Dict="</s>",_A : Union[str, Any]="</s>",_A : Union[str, Any]="<s>",_A : Optional[Any]="<unk>",_A : str="<pad>",_A : Tuple="<mask>",**_A : Dict,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
_A,tokenizer_file=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,cls_token=_A,pad_token=_A,mask_token=_A,**_A,)
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_file
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False if not self.vocab_file else True
def __UpperCamelCase ( self : Any,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : Tuple,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : List[str],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : str = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file,_A )
return (out_vocab_file,)
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = mock.Mock()
lowerCamelCase = 5_00
lowerCamelCase = {}
lowerCamelCase = HTTPError
lowerCamelCase = {}
# Download this model to make sure it's in the cache.
lowerCamelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__a ) as mock_head:
lowerCamelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _a (self ):
'''simple docstring'''
lowerCamelCase = mock.Mock()
lowerCamelCase = 5_00
lowerCamelCase = {}
lowerCamelCase = HTTPError
lowerCamelCase = {}
# Download this model to make sure it's in the cache.
lowerCamelCase = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__a ) as mock_head:
lowerCamelCase = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def _a (self ):
'''simple docstring'''
try:
lowerCamelCase = tempfile.mktemp()
with open(__a , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , __a )
lowerCamelCase = AlbertTokenizer.from_pretrained(__a )
finally:
os.remove(__a )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , __a )
lowerCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 10_00 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def _a (self ):
'''simple docstring'''
lowerCamelCase = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
_A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _a (cls ):
'''simple docstring'''
lowerCamelCase = TOKEN
HfFolder.save_token(__a )
@classmethod
def _a (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def _a (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = os.path.join(__a , "vocab.txt" )
with open(__a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase = BertTokenizer(__a )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
lowerCamelCase = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a , repo_id="test-tokenizer" , push_to_hub=__a , use_auth_token=self._token )
lowerCamelCase = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _a (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = os.path.join(__a , "vocab.txt" )
with open(__a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase = BertTokenizer(__a )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
lowerCamelCase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__a , repo_id="valid_org/test-tokenizer-org" , push_to_hub=__a , use_auth_token=self._token )
lowerCamelCase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _a (self ):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = os.path.join(__a , "vocab.txt" )
with open(__a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase = CustomTokenizer(__a )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
lowerCamelCase = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=__a )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = os.path.join(__a , "vocab.txt" )
with open(__a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase = BertTokenizerFast.from_pretrained(__a )
bert_tokenizer.save_pretrained(__a )
lowerCamelCase = CustomTokenizerFast.from_pretrained(__a )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
lowerCamelCase = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=__a )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
lowerCamelCase = AutoTokenizer.from_pretrained(
F"""{USER}/test-dynamic-tokenizer""" , use_fast=__a , trust_remote_code=__a )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
lowerCamelCase = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__a , ["AB", "C"] )
484
1
'''simple docstring'''
def _a ( lowerCamelCase_ ):
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
349
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
349
1
from __future__ import annotations
def A (__A : tuple[int, int] , __A : int ) -> list[tuple[int, int]]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = position
UpperCAmelCase_ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__A )
return permissible_positions
def A (__A : list[list[int]] ) -> bool:
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def A (__A : list[list[int]] , __A : tuple[int, int] , __A : int ) -> bool:
"""simple docstring"""
if is_complete(__A ):
return True
for position in get_valid_pos(__A , len(__A ) ):
UpperCAmelCase_ , UpperCAmelCase_ = position
if board[y][x] == 0:
UpperCAmelCase_ = curr + 1
if open_knight_tour_helper(__A , __A , curr + 1 ):
return True
UpperCAmelCase_ = 0
return False
def A (__A : int ) -> list[list[int]]:
"""simple docstring"""
UpperCAmelCase_ = [[0 for i in range(__A )] for j in range(__A )]
for i in range(__A ):
for j in range(__A ):
UpperCAmelCase_ = 1
if open_knight_tour_helper(__A , (i, j) , 1 ):
return board
UpperCAmelCase_ = 0
UpperCAmelCase_ = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _UpperCamelCase ( self : int ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : List[Any] ) -> Dict:
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=0.9 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : Optional[int]=0.5 ) -> Any:
if NLTK_VERSION >= version.Version("3.6.5" ):
UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(lowerCAmelCase__ ) , word_tokenize(lowerCAmelCase__ ) , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
else:
UpperCAmelCase = [
meteor_score.single_meteor_score(lowerCAmelCase__ , lowerCAmelCase__ , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return {"meteor": np.mean(lowerCAmelCase__ )}
1
0
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a__ ( lowercase__ ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class A ( __lowercase ):
@staticmethod
def lowerCAmelCase__ ( _lowerCAmelCase: ArgumentParser ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=_lowerCAmelCase , help="Name of the model to download" )
download_parser.set_defaults(func=_lowerCAmelCase )
def __init__( self: Tuple , _lowerCAmelCase: str , _lowerCAmelCase: str , _lowerCAmelCase: bool , _lowerCAmelCase: bool ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =model
UpperCAmelCase_ =cache
UpperCAmelCase_ =force
UpperCAmelCase_ =trust_remote_code
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
54
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__UpperCamelCase : Union[str, Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def a_ ( _A ) -> Any:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
snake_case__ = k.replace(_A , _A )
return k
def a_ ( _A , _A ) -> PegasusForConditionalGeneration:
"""simple docstring"""
snake_case__ = DEFAULTS.copy()
cfg_kwargs.update(_A )
snake_case__ = PegasusConfig(**_A )
snake_case__ = PegasusForConditionalGeneration(_A )
snake_case__ = torch_model.model.state_dict()
snake_case__ = {}
for k, v in tf_weights.items():
snake_case__ = rename_state_dict_key(_A )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
snake_case__ = v.T
snake_case__ = torch.tensor(_A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
snake_case__ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
snake_case__ = mapping['shared.weight']
snake_case__ = mapping['shared.weight']
snake_case__ = {k: torch.zeros_like(_A ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**_A )
snake_case__ , snake_case__ = torch_model.model.load_state_dict(_A , strict=_A )
snake_case__ = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def a_ ( _A="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
"""simple docstring"""
snake_case__ = tf.train.list_variables(_A )
snake_case__ = {}
snake_case__ = ['Adafactor', 'global_step']
for name, shape in tqdm(_A , desc='converting tf checkpoint to dict' ):
snake_case__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case__ = tf.train.load_variable(_A , _A )
snake_case__ = array
return tf_weights
def a_ ( _A , _A ) -> List[Any]:
"""simple docstring"""
# save tokenizer first
snake_case__ = Path(_A ).parent.name
snake_case__ = task_specific_params[f'''summarization_{dataset}''']['max_position_embeddings']
snake_case__ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=_A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_A )
# convert model
snake_case__ = get_tf_weights_as_numpy(_A )
snake_case__ = task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
snake_case__ = task_specific_params
snake_case__ = convert_pegasus(_A , _A )
torch_model.save_pretrained(_A )
snake_case__ = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(_A , Path(_A ) / 'pytorch_model.bin' )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
__UpperCamelCase : List[Any] = parser.parse_args()
if args.save_dir is None:
__UpperCamelCase : Any = Path(args.tf_ckpt_path).parent.name
__UpperCamelCase : List[Any] = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class lowerCAmelCase_ ( lowercase_ ):
SCREAMING_SNAKE_CASE_ : Union[List[PIL.Image.Image], np.ndarray]
SCREAMING_SNAKE_CASE_ : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
416
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _A ( _UpperCamelCase ):
_UpperCAmelCase : Tuple = prime_factors(_UpperCamelCase )
if is_square_free(_UpperCamelCase ):
return -1 if len(_UpperCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
416
1
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=sys.maxsize ) -> str:
_UpperCamelCase : str = "bilinear"
_UpperCamelCase : Any = max_size
_UpperCamelCase : Dict = short_edge_length
def __call__( self , _snake_case ) -> Tuple:
_UpperCamelCase : str = []
for img in imgs:
_UpperCamelCase : Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
_UpperCamelCase : str = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_UpperCamelCase : Optional[int] = size * 1.0 / min(lowerCAmelCase__ , lowerCAmelCase__ )
if h < w:
_UpperCamelCase : Tuple = size, scale * w
else:
_UpperCamelCase : Dict = scale * h, size
if max(lowerCAmelCase__ , lowerCAmelCase__ ) > self.max_size:
_UpperCamelCase : Any = self.max_size * 1.0 / max(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : Tuple = newh * scale
_UpperCamelCase : Optional[Any] = neww * scale
_UpperCamelCase : Dict = int(neww + 0.5 )
_UpperCamelCase : Union[str, Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
_UpperCamelCase : str = Image.fromarray(lowerCAmelCase__ )
_UpperCamelCase : int = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_UpperCamelCase : Union[str, Any] = np.asarray(lowerCAmelCase__ )
else:
_UpperCamelCase : int = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_UpperCamelCase : int = nn.functional.interpolate(
lowerCAmelCase__ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase__ ).squeeze(0 )
img_augs.append(lowerCAmelCase__ )
return img_augs
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case ) -> Optional[Any]:
_UpperCamelCase : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_UpperCamelCase : List[Any] = cfg.INPUT.FORMAT
_UpperCamelCase : Union[str, Any] = cfg.SIZE_DIVISIBILITY
_UpperCamelCase : List[str] = cfg.PAD_VALUE
_UpperCamelCase : List[Any] = cfg.INPUT.MAX_SIZE_TEST
_UpperCamelCase : List[Any] = cfg.MODEL.DEVICE
_UpperCamelCase : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_UpperCamelCase : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_UpperCamelCase : Dict = lambda _snake_case : (x - self.pixel_mean) / self.pixel_std
def _lowercase ( self , _snake_case ) -> int:
_UpperCamelCase : Dict = tuple(max(lowerCAmelCase__ ) for s in zip(*[img.shape for img in images] ) )
_UpperCamelCase : Any = [im.shape[-2:] for im in images]
_UpperCamelCase : Optional[int] = [
nn.functional.pad(
lowerCAmelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return torch.stack(lowerCAmelCase__ ), torch.tensor(lowerCAmelCase__ )
def __call__( self , _snake_case , _snake_case=False ) -> List[str]:
with torch.no_grad():
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase : Dict = [images]
if single_image:
assert len(lowerCAmelCase__ ) == 1
for i in range(len(lowerCAmelCase__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCAmelCase__ , images.pop(lowerCAmelCase__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCAmelCase__ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_UpperCamelCase : List[Any] = torch.tensor([im.shape[:2] for im in images] )
_UpperCamelCase : List[Any] = self.aug(lowerCAmelCase__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_UpperCamelCase : Any = [self.normalizer(lowerCAmelCase__ ) for x in images]
# now pad them to do the following operations
_UpperCamelCase : int = self.pad(lowerCAmelCase__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_UpperCamelCase : Optional[Any] = torch.true_divide(lowerCAmelCase__ , lowerCAmelCase__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> List[str]:
assert torch.isfinite(UpperCamelCase ).all(), "Box tensor contains infinite or NaN!"
_UpperCamelCase : Optional[int] = box_size
tensor[:, 0].clamp_(min=0 ,max=UpperCamelCase )
tensor[:, 1].clamp_(min=0 ,max=UpperCamelCase )
tensor[:, 2].clamp_(min=0 ,max=UpperCamelCase )
tensor[:, 3].clamp_(min=0 ,max=UpperCamelCase )
683
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
snake_case_ : Tuple = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
snake_case_ : Union[str, Any] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__magic_name__ ):
os.makedirs(__magic_name__ )
snake_case_ : str = model.state_dict()
def to_tf_var_name(__magic_name__ ):
for patt, repl in iter(__magic_name__ ):
snake_case_ : List[str] = name.replace(__magic_name__ ,__magic_name__ )
return F'''bert/{name}'''
def create_tf_var(__magic_name__ ,__magic_name__ ,__magic_name__ ):
snake_case_ : List[Any] = tf.dtypes.as_dtype(tensor.dtype )
snake_case_ : Union[str, Any] = tf.get_variable(dtype=__magic_name__ ,shape=tensor.shape ,name=__magic_name__ ,initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__magic_name__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
snake_case_ : Optional[int] = to_tf_var_name(__magic_name__ )
snake_case_ : Dict = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
snake_case_ : List[Any] = torch_tensor.T
snake_case_ : Union[str, Any] = create_tf_var(tensor=__magic_name__ ,name=__magic_name__ ,session=__magic_name__ )
tf.keras.backend.set_value(__magic_name__ ,__magic_name__ )
snake_case_ : List[str] = session.run(__magic_name__ )
print(F'''Successfully created {tf_name}: {np.allclose(__magic_name__ ,__magic_name__ )}''' )
snake_case_ : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(__magic_name__ ,os.path.join(__magic_name__ ,model_name.replace("-" ,"_" ) + ".ckpt" ) )
def __UpperCAmelCase ( __magic_name__=None )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" ,type=__magic_name__ ,required=__magic_name__ ,help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" ,type=__magic_name__ ,default=__magic_name__ ,required=__magic_name__ ,help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" ,type=__magic_name__ ,required=__magic_name__ ,help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" ,type=__magic_name__ ,required=__magic_name__ ,help="Directory in which to save tensorflow model" )
snake_case_ : Optional[int] = parser.parse_args(__magic_name__ )
snake_case_ : Optional[int] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path ) ,cache_dir=args.cache_dir ,)
convert_pytorch_checkpoint_to_tf(model=__magic_name__ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name )
if __name__ == "__main__":
main()
653
0
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCAmelCase__ : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase_ ( datasets.BuilderConfig ):
SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None
def _A ( _UpperCamelCase , _UpperCamelCase , ):
import pyspark
def generate_fn():
_UpperCAmelCase : Tuple = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
_UpperCAmelCase : Optional[Any] = df_with_partition_id.select('''*''' ).where(F'''part_id = {partition_id}''' ).drop('''part_id''' )
_UpperCAmelCase : Union[str, Any] = partition_df.collect()
_UpperCAmelCase : int = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase_ ( _BaseExamplesIterable ):
def __init__( self : Tuple , UpperCAmelCase_ : "pyspark.sql.DataFrame" , UpperCAmelCase_ : Union[str, Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = df
_UpperCAmelCase : Optional[int] = partition_order or range(self.df.rdd.getNumPartitions() )
_UpperCAmelCase : Tuple = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Dict ) -> str:
'''simple docstring'''
yield from self.generate_examples_fn()
def a_ ( self : Tuple , UpperCAmelCase_ : np.random.Generator ) -> "SparkExamplesIterable":
'''simple docstring'''
_UpperCAmelCase : Dict = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCAmelCase_ )
return SparkExamplesIterable(self.df , partition_order=UpperCAmelCase_ )
def a_ ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> "SparkExamplesIterable":
'''simple docstring'''
_UpperCAmelCase : Dict = self.split_shard_indices_by_worker(UpperCAmelCase_ , UpperCAmelCase_ )
return SparkExamplesIterable(self.df , partition_order=UpperCAmelCase_ )
@property
def a_ ( self : List[str] ) -> int:
'''simple docstring'''
return len(self.partition_order )
class lowerCAmelCase_ ( datasets.DatasetBuilder ):
SCREAMING_SNAKE_CASE_ : int = SparkConfig
def __init__( self : Any , UpperCAmelCase_ : "pyspark.sql.DataFrame" , UpperCAmelCase_ : str = None , UpperCAmelCase_ : str = None , **UpperCAmelCase_ : int , ) -> Dict:
'''simple docstring'''
import pyspark
_UpperCAmelCase : Union[str, Any] = pyspark.sql.SparkSession.builder.getOrCreate()
_UpperCAmelCase : Union[str, Any] = df
_UpperCAmelCase : List[str] = working_dir
super().__init__(
cache_dir=UpperCAmelCase_ , config_name=str(self.df.semanticHash() ) , **UpperCAmelCase_ , )
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
def create_cache_and_write_probe(UpperCAmelCase_ : Tuple ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=UpperCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCAmelCase_ , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_UpperCAmelCase : List[Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCAmelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def a_ ( self : Optional[int] , UpperCAmelCase_ : datasets.download.download_manager.DownloadManager ) -> Dict:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def a_ ( self : List[Any] , UpperCAmelCase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
import pyspark
def get_arrow_batch_size(UpperCAmelCase_ : List[str] ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
_UpperCAmelCase : str = self.df.count()
_UpperCAmelCase : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_UpperCAmelCase : Optional[Any] = (
self.df.limit(UpperCAmelCase_ )
.repartition(1 )
.mapInArrow(UpperCAmelCase_ , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_UpperCAmelCase : int = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_UpperCAmelCase : List[Any] = min(UpperCAmelCase_ , int(approx_total_size / max_shard_size ) )
_UpperCAmelCase : Tuple = self.df.repartition(UpperCAmelCase_ )
def a_ ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
'''simple docstring'''
import pyspark
_UpperCAmelCase : List[str] = ParquetWriter if file_format == '''parquet''' else ArrowWriter
_UpperCAmelCase : Dict = os.path.join(self._working_dir , os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath
_UpperCAmelCase : Union[str, Any] = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_UpperCAmelCase : Tuple = self.config.features
_UpperCAmelCase : List[Any] = self._writer_batch_size
_UpperCAmelCase : Tuple = self._fs.storage_options
def write_arrow(UpperCAmelCase_ : int ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_UpperCAmelCase : Any = pyspark.TaskContext().taskAttemptId()
_UpperCAmelCase : int = next(UpperCAmelCase_ , UpperCAmelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : List[str] = writer_class(
features=UpperCAmelCase_ , path=working_fpath.replace('''SSSSS''' , F'''{shard_id:05d}''' ).replace('''TTTTT''' , F'''{task_id:05d}''' ) , writer_batch_size=UpperCAmelCase_ , storage_options=UpperCAmelCase_ , embed_local_files=UpperCAmelCase_ , )
_UpperCAmelCase : List[str] = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCAmelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_UpperCAmelCase : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
_UpperCAmelCase : int = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , F'''{shard_id:05d}''' ).replace('''TTTTT''' , F'''{task_id:05d}''' ) , writer_batch_size=UpperCAmelCase_ , storage_options=UpperCAmelCase_ , embed_local_files=UpperCAmelCase_ , )
_UpperCAmelCase : int = pa.Table.from_batches([batch] )
writer.write_table(UpperCAmelCase_ )
if writer._num_bytes > 0:
_UpperCAmelCase : Union[str, Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ):
_UpperCAmelCase : List[str] = os.path.join(os.path.dirname(UpperCAmelCase_ ) , os.path.basename(UpperCAmelCase_ ) )
shutil.move(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : Optional[int] = (
self.df.mapInArrow(UpperCAmelCase_ , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def a_ ( self : Dict , UpperCAmelCase_ : "datasets.SplitGenerator" , UpperCAmelCase_ : str = "arrow" , UpperCAmelCase_ : Optional[Union[str, int]] = None , UpperCAmelCase_ : Optional[int] = None , **UpperCAmelCase_ : Optional[Any] , ) -> int:
'''simple docstring'''
self._validate_cache_dir()
_UpperCAmelCase : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCAmelCase_ )
_UpperCAmelCase : List[Any] = not is_remote_filesystem(self._fs )
_UpperCAmelCase : Tuple = os.path.join if is_local else posixpath.join
_UpperCAmelCase : List[Any] = '''-TTTTT-SSSSS-of-NNNNN'''
_UpperCAmelCase : Optional[int] = F'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
_UpperCAmelCase : Optional[int] = path_join(self._output_dir , UpperCAmelCase_ )
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : int = 0
_UpperCAmelCase : int = []
_UpperCAmelCase : Any = []
for task_id, content in self._prepare_split_single(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
(
_UpperCAmelCase
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCAmelCase_ )
_UpperCAmelCase : str = total_num_examples
_UpperCAmelCase : List[Any] = total_num_bytes
# should rename everything at the end
logger.debug(F'''Renaming {total_shards} shards.''' )
if total_shards > 1:
_UpperCAmelCase : Any = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_UpperCAmelCase : Tuple = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , ):
rename(
UpperCAmelCase_ , fpath.replace('''SSSSS''' , F'''{shard_id:05d}''' ).replace('''TTTTT''' , F'''{task_id:05d}''' ) , fpath.replace('''TTTTT-SSSSS''' , F'''{global_shard_id:05d}''' ).replace('''NNNNN''' , F'''{total_shards:05d}''' ) , )
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[Any] = 0
for i in range(len(UpperCAmelCase_ ) ):
_UpperCAmelCase : int = task_id_and_num_shards[i]
for shard_id in range(UpperCAmelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCAmelCase_ , len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect()
else:
# don't use any pattern
_UpperCAmelCase : int = 0
_UpperCAmelCase : str = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , F'''{shard_id:05d}''' ).replace('''TTTTT''' , F'''{task_id:05d}''' ) , fpath.replace(UpperCAmelCase_ , '''''' ) , )
def a_ ( self : Tuple , UpperCAmelCase_ : "datasets.SplitGenerator" , ) -> SparkExamplesIterable:
'''simple docstring'''
return SparkExamplesIterable(self.df )
715
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
def _A ( _UpperCamelCase ):
if isinstance(_UpperCamelCase , np.ndarray ):
return list(tensor.shape )
_UpperCAmelCase : int = tf.shape(_UpperCamelCase )
if tensor.shape == tf.TensorShape(_UpperCamelCase ):
return dynamic
_UpperCAmelCase : Optional[int] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_UpperCamelCase )]
def _A ( _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None ):
return tf.nn.softmax(logits=logits + 1e-9 , axis=_UpperCamelCase , name=_UpperCamelCase )
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-5 , _UpperCamelCase=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
_UpperCAmelCase , _UpperCAmelCase : Tuple = tf.nn.moments(_UpperCamelCase , axes=[axis] , keepdims=_UpperCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_UpperCAmelCase : List[Any] = [1] * inputs.shape.rank
_UpperCAmelCase : Any = shape_list(_UpperCamelCase )[axis]
_UpperCAmelCase : Union[str, Any] = tf.reshape(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase : Any = tf.reshape(_UpperCamelCase , _UpperCamelCase )
# Compute layer normalization using the batch_normalization
# function.
_UpperCAmelCase : Union[str, Any] = tf.nn.batch_normalization(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , offset=_UpperCamelCase , scale=_UpperCamelCase , variance_epsilon=_UpperCamelCase , )
return outputs
def _A ( _UpperCamelCase , _UpperCamelCase=0 , _UpperCamelCase=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_UpperCAmelCase : str = tf.shape(_UpperCamelCase )
_UpperCAmelCase : Dict = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_UpperCAmelCase : str = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(_UpperCamelCase , _UpperCamelCase )
def _A ( _UpperCamelCase ):
if not isinstance(_UpperCamelCase , tf.Tensor ):
_UpperCAmelCase : Any = tf.convert_to_tensor(_UpperCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_UpperCAmelCase : List[Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_UpperCAmelCase : Dict = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_UpperCAmelCase : Optional[Any] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = "input_ids" ):
tf.debugging.assert_less(
_UpperCamelCase , tf.cast(_UpperCamelCase , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(_UpperCamelCase )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase : int = 64_512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_UpperCAmelCase : int = [x for x in data if len(_UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
_UpperCAmelCase : Dict = np.asarray(_UpperCamelCase )
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Union[str, Any] = np.array_split(_UpperCamelCase , _UpperCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_UpperCAmelCase : Optional[Any] = np.array_split(_UpperCamelCase , _UpperCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_UpperCamelCase ):
_UpperCAmelCase : int = chunk_data
else:
_UpperCAmelCase : Optional[Any] = data
def _A ( _UpperCamelCase , _UpperCamelCase ):
if name in group.attrs:
_UpperCAmelCase : List[str] = [n.decode('''utf8''' ) if hasattr(_UpperCamelCase , '''decode''' ) else n for n in group.attrs[name]]
else:
_UpperCAmelCase : str = []
_UpperCAmelCase : int = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(_UpperCamelCase , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def _A ( _UpperCamelCase ):
def _expand_single_ad_tensor(_UpperCamelCase ):
if isinstance(_UpperCamelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_UpperCamelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , _UpperCamelCase )