code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = "▁"
SCREAMING_SNAKE_CASE : int = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE : Tuple = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
SCREAMING_SNAKE_CASE : str = {
"google/reformer-crime-and-punishment": 524288,
}
class _lowerCamelCase( _a ):
lowercase_ : Optional[Any] = VOCAB_FILES_NAMES
lowercase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : int = ["""input_ids""", """attention_mask"""]
def __init__( self, lowerCamelCase, lowerCamelCase="</s>", lowerCamelCase="<unk>", lowerCamelCase=[], lowerCamelCase = None, **lowerCamelCase, ) -> None:
"""simple docstring"""
_lowercase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCamelCase, unk_token=lowerCamelCase, additional_special_tokens=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, )
_lowercase : List[str] = vocab_file
_lowercase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase)
@property
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
return self.sp_model.get_piece_size()
def UpperCamelCase ( self) -> Dict[str, int]:
"""simple docstring"""
_lowercase : str = {self.convert_ids_to_tokens(lowerCamelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : str = self.__dict__.copy()
_lowercase : Tuple = None
return state
def __setstate__( self, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs'):
_lowercase : Dict = {}
_lowercase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> List[Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Tuple:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
_lowercase : Any = self.sp_model.IdToPiece(lowerCamelCase)
return token
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Tuple = []
_lowercase : Optional[Any] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase) + token
_lowercase : Tuple = []
else:
current_sub_tokens.append(lowerCamelCase)
out_string += self.sp_model.decode(lowerCamelCase)
return out_string.strip()
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_lowercase : Optional[Any] = os.path.join(
lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, lowerCamelCase)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase, 'wb') as fi:
_lowercase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase)
return (out_vocab_file,)
| 89 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> float:
if not nums:
raise ValueError('List is empty' )
return sum(lowerCamelCase_ ) / len(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 1 |
import collections
import os
import re
from pathlib import Path
UpperCamelCase__ = "src/transformers"
# Matches is_xxx_available()
UpperCamelCase__ = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
UpperCamelCase__ = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCamelCase__ = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
UpperCamelCase__ = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
UpperCamelCase__ = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCamelCase__ = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCamelCase__ = re.compile(r"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCamelCase__ = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
UpperCamelCase__ = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
UpperCamelCase__ = re.compile(r"^\s*try:")
# Catches a line with else:
UpperCamelCase__ = re.compile(r"^\s*else:")
def _UpperCamelCase (a__ :Dict ):
"""simple docstring"""
if _re_test_backend.search(a__ ) is None:
return None
UpperCamelCase__ = [b[0] for b in _re_backend.findall(a__ )]
backends.sort()
return "_and_".join(a__ )
def _UpperCamelCase (a__ :Any ):
"""simple docstring"""
with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase__ = f.readlines()
UpperCamelCase__ = 0
while line_index < len(a__ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(a__ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase__ = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
UpperCamelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(a__ ):
UpperCamelCase__ = _re_one_line_import_struct.search(a__ ).groups()[0]
UpperCamelCase__ = re.findall(r"""\[([^\]]+)\]""" , a__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
UpperCamelCase__ = _re_import_struct_key_value.search(a__ )
if single_line_import_search is not None:
UpperCamelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(a__ ) > 0]
objects.extend(a__ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase__ = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
UpperCamelCase__ = lines[line_index]
if _re_import_struct_add_one.search(a__ ) is not None:
objects.append(_re_import_struct_add_one.search(a__ ).groups()[0] )
elif _re_import_struct_add_many.search(a__ ) is not None:
UpperCamelCase__ = _re_import_struct_add_many.search(a__ ).groups()[0].split(""", """ )
UpperCamelCase__ = [obj[1:-1] for obj in imports if len(a__ ) > 0]
objects.extend(a__ )
elif _re_between_brackets.search(a__ ) is not None:
UpperCamelCase__ = _re_between_brackets.search(a__ ).groups()[0].split(""", """ )
UpperCamelCase__ = [obj[1:-1] for obj in imports if len(a__ ) > 0]
objects.extend(a__ )
elif _re_quote_object.search(a__ ) is not None:
objects.append(_re_quote_object.search(a__ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
UpperCamelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase__ = []
while (
line_index < len(a__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_import.search(a__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase__ = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(a__ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_import.search(a__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCamelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _UpperCamelCase (a__ :List[str] , a__ :Union[str, Any] ):
"""simple docstring"""
def find_duplicates(a__ :List[Any] ):
return [k for k, v in collections.Counter(a__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase__ = []
for key in import_dict_objects.keys():
UpperCamelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCamelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase__ = """base imports""" if key == """none""" else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = []
for root, _, files in os.walk(a__ ):
if "__init__.py" in files:
UpperCamelCase__ = os.path.join(a__ , """__init__.py""" )
UpperCamelCase__ = parse_init(a__ )
if objects is not None:
UpperCamelCase__ = analyze_results(*a__ )
if len(a__ ) > 0:
UpperCamelCase__ = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("""\n""".join(a__ ) )
if len(a__ ) > 0:
raise ValueError("""\n\n""".join(a__ ) )
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = []
for path, directories, files in os.walk(a__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(a__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(a__ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
UpperCamelCase__ = str((Path(a__ ) / folder).relative_to(a__ ) )
UpperCamelCase__ = short_path.replace(os.path.sep , """.""" )
submodules.append(a__ )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase__ = str((Path(a__ ) / fname).relative_to(a__ ) )
UpperCamelCase__ = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(a__ )
return submodules
UpperCamelCase__ = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def _UpperCamelCase ():
"""simple docstring"""
from transformers.utils import direct_transformers_import
UpperCamelCase__ = direct_transformers_import(a__ )
UpperCamelCase__ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(a__ , """__init__.py""" ) , """r""" ) as f:
UpperCamelCase__ = f.read()
import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" , a__ ) ) )
UpperCamelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(a__ ) > 0:
UpperCamelCase__ = """\n""".join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
f"""{list_of_modules}\n"""
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 548 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 548 | 1 |
from timeit import timeit
def a ( snake_case__: int ):
'''simple docstring'''
if number < 0:
raise ValueError('''the value of input must not be negative''' )
lowercase_ = 0
while number:
number &= number - 1
result += 1
return result
def a ( snake_case__: int ):
'''simple docstring'''
if number < 0:
raise ValueError('''the value of input must not be negative''' )
lowercase_ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a ( ):
'''simple docstring'''
def do_benchmark(snake_case__: int ) -> None:
lowercase_ = '''import __main__ as z'''
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(snake_case__ ) = }''' )
lowercase_ = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=snake_case__ )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(snake_case__ ) = }''' )
lowercase_ = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=snake_case__ , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(snake_case__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 97 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = 2
while i * i <= n:
__lowerCamelCase : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCamelCase__ ( ):
__lowerCamelCase : str = 1
__lowerCamelCase : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 669 | 0 |
def _a ( UpperCamelCase_ : int = 1_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 3
lowerCAmelCase__ = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 115 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowercase__ ( nn.Module ):
a_ =42
a_ =42
a_ =0.0
a_ =1
a_ =1
a_ =True
a_ =False
a_ =False
a_ =False
a_ =jnp.floataa
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i in range(self.num_layers ):
lowerCAmelCase__ = self.in_channels if i == 0 else self.out_channels
lowerCAmelCase__ = FlaxResnetBlockaD(
in_channels=__UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
lowerCAmelCase__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__UpperCAmelCase )
lowerCAmelCase__ = resnets
lowerCAmelCase__ = attentions
if self.add_downsample:
lowerCAmelCase__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True )-> str:
'''simple docstring'''
lowerCAmelCase__ = ()
for resnet, attn in zip(self.resnets , self.attentions ):
lowerCAmelCase__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
lowerCAmelCase__ = attn(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
lowerCAmelCase__ = self.downsamplers_a(__UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase__ ( nn.Module ):
a_ =42
a_ =42
a_ =0.0
a_ =1
a_ =True
a_ =jnp.floataa
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = []
for i in range(self.num_layers ):
lowerCAmelCase__ = self.in_channels if i == 0 else self.out_channels
lowerCAmelCase__ = FlaxResnetBlockaD(
in_channels=__UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
lowerCAmelCase__ = resnets
if self.add_downsample:
lowerCAmelCase__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True )-> int:
'''simple docstring'''
lowerCAmelCase__ = ()
for resnet in self.resnets:
lowerCAmelCase__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
lowerCAmelCase__ = self.downsamplers_a(__UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase__ ( nn.Module ):
a_ =42
a_ =42
a_ =42
a_ =0.0
a_ =1
a_ =1
a_ =True
a_ =False
a_ =False
a_ =False
a_ =jnp.floataa
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i in range(self.num_layers ):
lowerCAmelCase__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowerCAmelCase__ = self.prev_output_channel if i == 0 else self.out_channels
lowerCAmelCase__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
lowerCAmelCase__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__UpperCAmelCase )
lowerCAmelCase__ = resnets
lowerCAmelCase__ = attentions
if self.add_upsample:
lowerCAmelCase__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True )-> Dict:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
lowerCAmelCase__ = res_hidden_states_tuple[-1]
lowerCAmelCase__ = res_hidden_states_tuple[:-1]
lowerCAmelCase__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowerCAmelCase__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
lowerCAmelCase__ = attn(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
if self.add_upsample:
lowerCAmelCase__ = self.upsamplers_a(__UpperCAmelCase )
return hidden_states
class lowercase__ ( nn.Module ):
a_ =42
a_ =42
a_ =42
a_ =0.0
a_ =1
a_ =True
a_ =jnp.floataa
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = []
for i in range(self.num_layers ):
lowerCAmelCase__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowerCAmelCase__ = self.prev_output_channel if i == 0 else self.out_channels
lowerCAmelCase__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
lowerCAmelCase__ = resnets
if self.add_upsample:
lowerCAmelCase__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True )-> int:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
lowerCAmelCase__ = res_hidden_states_tuple[-1]
lowerCAmelCase__ = res_hidden_states_tuple[:-1]
lowerCAmelCase__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowerCAmelCase__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
if self.add_upsample:
lowerCAmelCase__ = self.upsamplers_a(__UpperCAmelCase )
return hidden_states
class lowercase__ ( nn.Module ):
a_ =42
a_ =0.0
a_ =1
a_ =1
a_ =False
a_ =False
a_ =jnp.floataa
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
lowerCAmelCase__ = []
for _ in range(self.num_layers ):
lowerCAmelCase__ = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__UpperCAmelCase )
lowerCAmelCase__ = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
lowerCAmelCase__ = resnets
lowerCAmelCase__ = attentions
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = self.resnets[0](__UpperCAmelCase , __UpperCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
lowerCAmelCase__ = attn(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
lowerCAmelCase__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
return hidden_states
| 115 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Tuple = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[str] = 'time_series_transformer'
_snake_case : str = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : str = "student_t" , lowerCAmelCase__ : str = "nll" , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase__ : Optional[Union[str, bool]] = "mean" , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : int = 32 , lowerCAmelCase__ : int = 32 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : str = "gelu" , lowerCAmelCase__ : int = 64 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : int = 100 , lowerCAmelCase__ : float = 0.02 , lowerCAmelCase__ : List[str]=True , **lowerCAmelCase__ : List[Any] , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = prediction_length
_UpperCamelCase = context_length or prediction_length
_UpperCamelCase = distribution_output
_UpperCamelCase = loss
_UpperCamelCase = input_size
_UpperCamelCase = num_time_features
_UpperCamelCase = lags_sequence
_UpperCamelCase = scaling
_UpperCamelCase = num_dynamic_real_features
_UpperCamelCase = num_static_real_features
_UpperCamelCase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(A__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
_UpperCamelCase = cardinality
else:
_UpperCamelCase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(A__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
_UpperCamelCase = embedding_dimension
else:
_UpperCamelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_UpperCamelCase = num_parallel_samples
# Transformer architecture configuration
_UpperCamelCase = input_size * len(A__ ) + self._number_of_features
_UpperCamelCase = d_model
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = decoder_layers
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = use_cache
super().__init__(is_encoder_decoder=A__ , **A__ )
@property
def snake_case__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 98 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ :Any = 16
UpperCAmelCase__ :List[str] = 32
def __lowercase (_lowercase, _lowercase = 16 ) -> int:
"""simple docstring"""
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__lowerCamelCase : str = load_dataset("""glue""", """mrpc""" )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase : int = tokenizer(examples["""sentence1"""], examples["""sentence2"""], truncation=_lowercase, max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCamelCase : int = datasets.map(
_lowercase, batched=_lowercase, remove_columns=["""idx""", """sentence1""", """sentence2"""], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase : Optional[Any] = tokenized_datasets.rename_column("""label""", """labels""" )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCamelCase : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCamelCase : Dict = 16
elif accelerator.mixed_precision != "no":
__lowerCamelCase : List[str] = 8
else:
__lowerCamelCase : int = None
return tokenizer.pad(
_lowercase, padding="""longest""", max_length=_lowercase, pad_to_multiple_of=_lowercase, return_tensors="""pt""", )
# Instantiate dataloaders.
__lowerCamelCase : List[Any] = DataLoader(
tokenized_datasets["""train"""], shuffle=_lowercase, collate_fn=_lowercase, batch_size=_lowercase )
__lowerCamelCase : str = DataLoader(
tokenized_datasets["""validation"""], shuffle=_lowercase, collate_fn=_lowercase, batch_size=_lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ :int = mocked_dataloaders # noqa: F811
def __lowercase (_lowercase, _lowercase ) -> Optional[int]:
"""simple docstring"""
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", _lowercase ) == "1":
__lowerCamelCase : Union[str, Any] = 2
# Initialize accelerator
__lowerCamelCase : List[str] = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase : Union[str, Any] = config["""lr"""]
__lowerCamelCase : Any = int(config["""num_epochs"""] )
__lowerCamelCase : Optional[Any] = int(config["""seed"""] )
__lowerCamelCase : Dict = int(config["""batch_size"""] )
__lowerCamelCase : Any = evaluate.load("""glue""", """mrpc""" )
# If the batch size is too big we use gradient accumulation
__lowerCamelCase : Tuple = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowerCamelCase : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
__lowerCamelCase : Optional[Any] = MAX_GPU_BATCH_SIZE
set_seed(_lowercase )
__lowerCamelCase , __lowerCamelCase : Dict = get_dataloaders(_lowercase, _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""", return_dict=_lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCamelCase : List[str] = model.to(accelerator.device )
# Instantiate optimizer
__lowerCamelCase : Tuple = AdamW(params=model.parameters(), lr=_lowercase )
# Instantiate scheduler
__lowerCamelCase : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=_lowercase, num_warmup_steps=100, num_training_steps=(len(_lowercase ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = accelerator.prepare(
_lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
# Now we train the model
for epoch in range(_lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCamelCase : Any = model(**_lowercase )
__lowerCamelCase : str = outputs.loss
__lowerCamelCase : Any = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__lowerCamelCase : Optional[int] = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase : List[Any] = model(**_lowercase )
__lowerCamelCase : Union[str, Any] = outputs.logits.argmax(dim=-1 )
__lowerCamelCase , __lowerCamelCase : List[str] = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_lowercase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__lowerCamelCase : int = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_lowercase, references=_lowercase, )
__lowerCamelCase : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", _lowercase )
def __lowercase () -> int:
"""simple docstring"""
__lowerCamelCase : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""", type=_lowercase, default=_lowercase, choices=["""no""", """fp16""", """bf16""", """fp8"""], help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""", )
parser.add_argument("""--cpu""", action="""store_true""", help="""If passed, will train on the CPU.""" )
__lowerCamelCase : int = parser.parse_args()
__lowerCamelCase : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_lowercase, _lowercase )
if __name__ == "__main__":
main()
| 150 | 0 |
'''simple docstring'''
import argparse
import datetime
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> str:
UpperCAmelCase_ : List[str] = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
UpperCAmelCase_ : Optional[Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(SCREAMING_SNAKE_CASE__ ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
UpperCAmelCase_ : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
UpperCAmelCase_ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
UpperCAmelCase_ : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
UpperCAmelCase_ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
UpperCAmelCase_ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
UpperCAmelCase_ : Union[str, Any] = datetime.date(int(SCREAMING_SNAKE_CASE__ ), int(SCREAMING_SNAKE_CASE__ ), int(SCREAMING_SNAKE_CASE__ ) )
# Start math
if m <= 2:
UpperCAmelCase_ : Optional[int] = y - 1
UpperCAmelCase_ : Dict = m + 12
# maths var
UpperCAmelCase_ : int = int(str(SCREAMING_SNAKE_CASE__ )[:2] )
UpperCAmelCase_ : int = int(str(SCREAMING_SNAKE_CASE__ )[2:] )
UpperCAmelCase_ : int = int(2.6 * m - 5.39 )
UpperCAmelCase_ : int = int(c / 4 )
UpperCAmelCase_ : int = int(k / 4 )
UpperCAmelCase_ : int = int(d + k )
UpperCAmelCase_ : int = int(t + u + v + x )
UpperCAmelCase_ : int = int(z - (2 * c) )
UpperCAmelCase_ : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
UpperCAmelCase_ : str = F"""Your date {date_input}, is a {days[str(SCREAMING_SNAKE_CASE__ )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Optional[Any] = argparse.ArgumentParser(
description=(
"Find out what day of the week nearly any date is or was. Enter "
"date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
)
)
parser.add_argument(
"date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
)
snake_case_ : Tuple = parser.parse_args()
zeller(args.date_input)
| 644 |
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[int]]:
UpperCAmelCase_ : int = []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : List[Any] = nums.pop(0 )
UpperCAmelCase_ : Optional[Any] = permute(SCREAMING_SNAKE_CASE__ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE__ )
result.extend(SCREAMING_SNAKE_CASE__ )
nums.append(SCREAMING_SNAKE_CASE__ )
return result
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
def backtrack(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if start == len(SCREAMING_SNAKE_CASE__ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = nums[i], nums[start] # backtrack
UpperCAmelCase_ : Optional[int] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
snake_case_ : Tuple = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 644 | 1 |
from __future__ import annotations
class lowerCamelCase_ :
def __init__( self , lowerCamelCase_=None ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = data
_UpperCamelCase = None
def __repr__( self ) -> int:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = self
while temp:
string_rep.append(f'''{temp.data}''' )
_UpperCamelCase = temp.next
return "->".join(lowerCamelCase_ )
def _lowercase ( a__ : list ) -> Dict:
"""simple docstring"""
if not elements_list:
raise Exception("The Elements List is empty" )
_UpperCamelCase = _UpperCamelCase = Node(elements_list[0] )
for i in range(1 , len(a__ ) ):
_UpperCamelCase = Node(elements_list[i] )
_UpperCamelCase = current.next
return head
def _lowercase ( a__ : Node ) -> None:
"""simple docstring"""
if head_node is not None and isinstance(a__ , a__ ):
print_reverse(head_node.next )
print(head_node.data )
def _lowercase ( ) -> int:
"""simple docstring"""
from doctest import testmod
testmod()
_UpperCamelCase = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(a__ )
print("Elements in Reverse:" )
print_reverse(a__ )
if __name__ == "__main__":
main()
| 147 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """vocab.txt"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
__lowerCAmelCase = {
"""facebook/esm2_t6_8M_UR50D""": 1_0_2_4,
"""facebook/esm2_t12_35M_UR50D""": 1_0_2_4,
}
def _lowercase ( a__ : Dict ) -> int:
"""simple docstring"""
with open(a__ , "r" ) as f:
_UpperCamelCase = f.read().splitlines()
return [l.strip() for l in lines]
class lowerCamelCase_ ( lowercase ):
__lowercase : Any = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = ["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_="<eos>" , **lowerCamelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
_UpperCamelCase = load_vocab_file(lowerCamelCase_ )
_UpperCamelCase = dict(enumerate(self.all_tokens ) )
_UpperCamelCase = {tok: ind for ind, tok in enumerate(self.all_tokens )}
_UpperCamelCase = unk_token
_UpperCamelCase = cls_token
_UpperCamelCase = pad_token
_UpperCamelCase = mask_token
_UpperCamelCase = eos_token
_UpperCamelCase = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def lowercase ( self , lowerCamelCase_ ) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def lowercase ( self , lowerCamelCase_ ) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def lowercase ( self , lowerCamelCase_ , **lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
return text.split()
def lowercase ( self , lowerCamelCase_=False ) -> Any:
"""simple docstring"""
return len(self._id_to_token )
def lowercase ( self ) -> Tuple:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens )}
def lowercase ( self , lowerCamelCase_ ) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def lowercase ( self , lowerCamelCase_ ) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
_UpperCamelCase = [1] + ([0] * len(lowerCamelCase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCamelCase_ ) + [1]
return mask
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = os.path.join(lowerCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(lowerCamelCase_ , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def lowercase ( self ) -> int:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ = False ) -> int:
"""simple docstring"""
return super()._add_tokens(lowerCamelCase_ , special_tokens=lowerCamelCase_ )
| 147 | 1 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCamelCase : List[str] = logging.getLogger(__name__)
lowerCamelCase : Dict = tf.data.AUTOTUNE
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
snake_case : Optional[int] = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" ,type=lowercase ,default="""roberta-base""" ,help="""The model config to use. Note that we don't copy the model's weights, only the config!""" ,)
parser.add_argument(
"""--tokenizer""" ,type=lowercase ,default="""unigram-tokenizer-wikitext""" ,help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" ,)
parser.add_argument(
"""--per_replica_batch_size""" ,type=lowercase ,default=8 ,help="""Batch size per TPU core.""" ,)
parser.add_argument(
"""--no_tpu""" ,action="""store_true""" ,help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" ,)
parser.add_argument(
"""--tpu_name""" ,type=lowercase ,help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" ,default="""local""" ,)
parser.add_argument(
"""--tpu_zone""" ,type=lowercase ,help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" ,)
parser.add_argument(
"""--gcp_project""" ,type=lowercase ,help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" ,action="""store_true""" ,help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" ,)
parser.add_argument(
"""--train_dataset""" ,type=lowercase ,help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" ,)
parser.add_argument(
"""--shuffle_buffer_size""" ,type=lowercase ,default=2**18 ,help="""Size of the shuffle buffer (in samples)""" ,)
parser.add_argument(
"""--eval_dataset""" ,type=lowercase ,help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" ,)
parser.add_argument(
"""--num_epochs""" ,type=lowercase ,default=1 ,help="""Number of epochs to train for.""" ,)
parser.add_argument(
"""--learning_rate""" ,type=lowercase ,default=1E-4 ,help="""Learning rate to use for training.""" ,)
parser.add_argument(
"""--weight_decay_rate""" ,type=lowercase ,default=1E-3 ,help="""Weight decay rate to use for training.""" ,)
parser.add_argument(
"""--max_length""" ,type=lowercase ,default=512 ,help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" ,)
parser.add_argument(
"""--mlm_probability""" ,type=lowercase ,default=0.15 ,help="""Fraction of tokens to mask during training.""" ,)
parser.add_argument("""--output_dir""" ,type=lowercase ,required=lowercase ,help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" ,type=lowercase ,help="""Model ID to upload to on the Hugging Face Hub.""" )
snake_case : List[Any] = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
try:
if args.tpu_name:
snake_case : Any = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name ,zone=args.tpu_zone ,project=args.gcp_project )
else:
snake_case : Any = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(lowercase )
tf.tpu.experimental.initialize_tpu_system(lowercase )
return tpu
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
snake_case : int = 0
for file in file_list:
snake_case : List[str] = file.split("""/""" )[-1]
snake_case : Any = re.search(R"""-\d+-(\d+)\.tfrecord""" ,lowercase ).group(1 )
snake_case : int = int(lowercase )
num_samples += sample_count
return num_samples
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase=None ) -> int:
snake_case : Optional[Any] = count_samples(lowercase )
snake_case : Union[str, Any] = tf.data.Dataset.from_tensor_slices(lowercase )
if shuffle:
snake_case : Union[str, Any] = dataset.shuffle(len(lowercase ) )
snake_case : Tuple = tf.data.TFRecordDataset(lowercase ,num_parallel_reads=lowercase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
snake_case : str = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) )
snake_case : List[str] = dataset.map(lowercase ,num_parallel_calls=lowercase )
if shuffle:
assert shuffle_buffer_size is not None
snake_case : Tuple = dataset.shuffle(args.shuffle_buffer_size )
snake_case : Any = dataset.batch(lowercase ,drop_remainder=lowercase )
snake_case : List[str] = dataset.map(lowercase ,num_parallel_calls=lowercase )
snake_case : Union[str, Any] = dataset.prefetch(lowercase )
return dataset
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
if not args.no_tpu:
snake_case : List[str] = initialize_tpu(lowercase )
snake_case : Tuple = tf.distribute.TPUStrategy(lowercase )
else:
snake_case : Optional[Any] = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
snake_case : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer )
snake_case : List[Any] = AutoConfig.from_pretrained(args.pretrained_model_config )
snake_case : Tuple = tokenizer.vocab_size
snake_case : List[str] = tf.io.gfile.glob(os.path.join(args.train_dataset ,"""*.tfrecord""" ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
snake_case : Optional[int] = tf.io.gfile.glob(os.path.join(args.eval_dataset ,"""*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
snake_case : Dict = count_samples(lowercase )
snake_case : str = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
snake_case : Optional[int] = steps_per_epoch * args.num_epochs
with strategy.scope():
snake_case : Any = TFAutoModelForMaskedLM.from_config(lowercase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
snake_case , snake_case : List[Any] = create_optimizer(
num_train_steps=lowercase ,num_warmup_steps=total_train_steps // 20 ,init_lr=args.learning_rate ,weight_decay_rate=args.weight_decay_rate ,)
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase ,metrics=["""accuracy"""] )
def decode_fn(lowercase ):
snake_case : Dict = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa ,shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa ,shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase ,lowercase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
snake_case : Optional[Any] = DataCollatorForLanguageModeling(
tokenizer=lowercase ,mlm_probability=args.mlm_probability ,mlm=lowercase ,return_tensors="""tf""" )
def mask_with_collator(lowercase ):
# TF really needs an isin() function
snake_case : Dict = (
~tf.cast(batch["""attention_mask"""] ,tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
snake_case , snake_case : Dict = data_collator.tf_mask_tokens(
batch["""input_ids"""] ,vocab_size=len(lowercase ) ,mask_token_id=tokenizer.mask_token_id ,special_tokens_mask=lowercase ,)
return batch
snake_case : Union[str, Any] = args.per_replica_batch_size * strategy.num_replicas_in_sync
snake_case : Union[str, Any] = prepare_dataset(
lowercase ,decode_fn=lowercase ,mask_fn=lowercase ,batch_size=lowercase ,shuffle=lowercase ,shuffle_buffer_size=args.shuffle_buffer_size ,)
snake_case : List[str] = prepare_dataset(
lowercase ,decode_fn=lowercase ,mask_fn=lowercase ,batch_size=lowercase ,shuffle=lowercase ,)
snake_case : Union[str, Any] = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir ,hub_model_id=args.hub_model_id ,tokenizer=lowercase ) )
model.fit(
lowercase ,validation_data=lowercase ,epochs=args.num_epochs ,callbacks=lowercase ,)
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCamelCase : int = parse_args()
main(args)
| 684 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase : List[Any] = 'main'
# Default branch name
lowerCamelCase : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase : List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase : List[Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def UpperCAmelCase ( self ) -> str:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def UpperCAmelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , [] )
| 684 | 1 |
'''simple docstring'''
def A ():
_lowerCAmelCase = []
_lowerCAmelCase = 1
while len(__lowerCamelCase ) < 1e6:
constant.append(str(__lowerCamelCase ) )
i += 1
_lowerCAmelCase = """""".join(__lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 5 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = XGLMTokenizer
_lowercase = XGLMTokenizerFast
_lowercase = True
_lowercase = True
def _UpperCamelCase( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase( self : List[Any] ):
a__ : int = "<pad>"
a__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(lowerCamelCase__ ) , 1_008 )
def _UpperCamelCase( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def _UpperCamelCase( self : Optional[int] ):
a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
a__ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a__ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a__ : List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
a__ : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _UpperCamelCase( self : Dict ):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def _UpperCamelCase( self : Union[str, Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase__ , f.name )
a__ : Any = XGLMTokenizer(f.name , keep_accents=lowerCamelCase__ )
a__ : List[str] = pickle.dumps(lowerCamelCase__ )
pickle.loads(lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
if not self.test_rust_tokenizer:
return
a__ : Any = self.get_tokenizer()
a__ : Optional[Any] = self.get_rust_tokenizer()
a__ : Tuple = "I was born in 92000, and this is falsé."
a__ : List[str] = tokenizer.tokenize(lowerCamelCase__ )
a__ : Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
a__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : List[str] = self.get_rust_tokenizer()
a__ : Tuple = tokenizer.encode(lowerCamelCase__ )
a__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : List[str] ):
a__ : Union[str, Any] = "Hello World!"
a__ : List[str] = [2, 31_227, 4_447, 35]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def _UpperCamelCase( self : Union[str, Any] ):
a__ : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
a__ : Union[str, Any] = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def _UpperCamelCase( self : List[Any] ):
# fmt: off
a__ : Optional[int] = {
"input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="facebook/xglm-564M" , padding=lowerCamelCase__ , )
| 37 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> float:
__UpperCAmelCase = sorted(numsa + numsa )
__UpperCAmelCase , __UpperCAmelCase = divmod(len(_lowerCAmelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_A: Optional[int] = [float(x) for x in input("""Enter the elements of first array: """).split()]
_A: Dict = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 617 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_A: Union[str, Any] = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
_A: Optional[Any] = json.load(f)
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCamelCase ( self , __A ):
return FSMTTokenizer.from_pretrained(__A )
def __lowerCamelCase ( self , __A ):
__UpperCAmelCase = FSMTForConditionalGeneration.from_pretrained(__A ).to(__A )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def __lowerCamelCase ( self , __A , __A ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
__UpperCAmelCase = f'facebook/wmt19-{pair}'
__UpperCAmelCase = self.get_tokenizer(__A )
__UpperCAmelCase = self.get_model(__A )
__UpperCAmelCase = bleu_data[pair]['src']
__UpperCAmelCase = bleu_data[pair]['tgt']
__UpperCAmelCase = tokenizer(__A , return_tensors='pt' , truncation=__A , padding='longest' ).to(__A )
__UpperCAmelCase = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__UpperCAmelCase = tokenizer.batch_decode(
__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )
__UpperCAmelCase = calculate_bleu(__A , __A )
print(__A )
self.assertGreaterEqual(scores['bleu'] , __A )
| 617 | 1 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = (UnCLIPScheduler,)
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
lowerCamelCase__ = {
"""num_train_timesteps""": 10_00,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def UpperCamelCase_ ( self ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase ,prev_timestep=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(variance_type="""fixed_small_log""" )
lowerCamelCase__ = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.999_4987 ) ) < 1E-5
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(variance_type="""learned_range""" )
lowerCamelCase__ = scheduler_class(**_lowerCAmelCase )
lowerCamelCase__ = 0.5
assert scheduler._get_variance(1 ,predicted_variance=_lowerCAmelCase ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(4_87 ,predicted_variance=_lowerCAmelCase ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(9_99 ,predicted_variance=_lowerCAmelCase ) - -0.001_0011 < 1E-5
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**_lowerCAmelCase )
lowerCamelCase__ = scheduler.timesteps
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,generator=_lowerCAmelCase ).prev_sample
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
lowerCamelCase__ = scheduler.timesteps
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
lowerCamelCase__ = None
else:
lowerCamelCase__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ = scheduler.step(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,prev_timestep=_lowerCAmelCase ,generator=_lowerCAmelCase ).prev_sample
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
pass
| 50 |
'''simple docstring'''
import operator
def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ):
lowerCamelCase__ = operator.lt if reverse else operator.gt
lowerCamelCase__ = solution or []
if not arr:
return solution
lowerCamelCase__ = [arr.pop(0 )]
for i, item in enumerate(__lowerCAmelCase ):
if _operator(__lowerCAmelCase , sublist[-1] ):
sublist.append(__lowerCAmelCase )
arr.pop(__lowerCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(__lowerCAmelCase )
else:
while sublist:
lowerCamelCase__ = sublist.pop(0 )
for i, xx in enumerate(__lowerCAmelCase ):
if not _operator(__lowerCAmelCase , __lowerCAmelCase ):
solution.insert(__lowerCAmelCase , __lowerCAmelCase )
break
else:
solution.append(__lowerCAmelCase )
strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 50 | 1 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : float , _snake_case : float , _snake_case : float , ) -> tuple[str, float]:
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 505 |
"""simple docstring"""
a = 256
# Modulus to hash a string
a = 1_000_003
def _snake_case ( _snake_case : str , _snake_case : str ) -> bool:
'''simple docstring'''
_A = len(_snake_case )
_A = len(_snake_case )
if p_len > t_len:
return False
_A = 0
_A = 0
_A = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
_A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _snake_case ( ) -> None:
'''simple docstring'''
_A = 'abc1abc12'
_A = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
_A = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case )
# Test 2)
_A = 'ABABX'
_A = 'ABABZABABYABABX'
assert rabin_karp(_snake_case , _snake_case )
# Test 3)
_A = 'AAAB'
_A = 'ABAAAAAB'
assert rabin_karp(_snake_case , _snake_case )
# Test 4)
_A = 'abcdabcy'
_A = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(_snake_case , _snake_case )
# Test 5)
_A = 'Lü'
_A = 'Lüsai'
assert rabin_karp(_snake_case , _snake_case )
_A = 'Lue'
assert not rabin_karp(_snake_case , _snake_case )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 505 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( lowercase_ ):
def __init__( self :Any , *_lowercase :Dict , **_lowercase :List[str] ):
'''simple docstring'''
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 655 |
from __future__ import annotations
class lowerCAmelCase :
def __init__( self :Union[str, Any] , _lowercase :List[Any]=None ):
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __repr__( self :Dict ):
'''simple docstring'''
lowercase__ = []
lowercase__ = self
while temp:
string_rep.append(f'''{temp.data}''' )
lowercase__ = temp.next
return "->".join(_lowercase )
def _A ( __magic_name__ ):
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase__ = lowercase__ = Node(elements_list[0] )
for i in range(1 , len(__magic_name__ ) ):
lowercase__ = Node(elements_list[i] )
lowercase__ = current.next
return head
def _A ( __magic_name__ ):
if head_node is not None and isinstance(__magic_name__ , __magic_name__ ):
print_reverse(head_node.next )
print(head_node.data )
def _A ( ):
from doctest import testmod
testmod()
lowercase__ = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(__magic_name__ )
print("Elements in Reverse:" )
print_reverse(__magic_name__ )
if __name__ == "__main__":
main()
| 655 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [0] * len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [1] * len(_UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(_UpperCamelCase )
while queue:
SCREAMING_SNAKE_CASE = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
SCREAMING_SNAKE_CASE = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_UpperCamelCase )
print(max(_UpperCamelCase ) )
# Adjacency list of Graph
a_ : str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 673 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a_ : List[Any] = {
"allenai/led-base-16384": 1_6384,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =LEDTokenizer
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Dict="replace" , snake_case__ : Tuple="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : List[Any]=False , snake_case__ : int=True , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(snake_case__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(snake_case__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 673 | 1 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCamelCase ( lowercase_ : Dict , lowercase_ : List[Any] ) -> Any:
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def UpperCamelCase ( lowercase_ : str , lowercase_ : int , lowercase_ : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase =tmp_path / '''cache'''
lowercase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase =ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Any ) -> int:
'''simple docstring'''
lowercase =tmp_path / '''cache'''
lowercase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowercase =features.copy() if features else default_expected_features
lowercase =(
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase =ParquetDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def UpperCamelCase ( lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase =tmp_path / '''cache'''
lowercase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowercase =ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ , split=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def UpperCamelCase ( lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[Any] ) -> Any:
'''simple docstring'''
if issubclass(lowercase_ , lowercase_ ):
lowercase =parquet_path
elif issubclass(lowercase_ , lowercase_ ):
lowercase =[parquet_path]
lowercase =tmp_path / '''cache'''
lowercase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowercase =ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
def UpperCamelCase ( lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Any=("train",) ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_ )
for split in splits:
lowercase =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def UpperCamelCase ( lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[int] ) -> int:
'''simple docstring'''
lowercase =tmp_path / '''cache'''
lowercase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase =ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_parquet_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ) -> List[str]:
'''simple docstring'''
lowercase =tmp_path / '''cache'''
lowercase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowercase =features.copy() if features else default_expected_features
lowercase =(
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase =ParquetDatasetReader({'''train''': parquet_path} , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def UpperCamelCase ( lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Tuple ) -> Optional[int]:
'''simple docstring'''
if split:
lowercase ={split: parquet_path}
else:
lowercase ='''train'''
lowercase ={'''train''': parquet_path, '''test''': parquet_path}
lowercase =tmp_path / '''cache'''
lowercase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowercase =ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_datasetdict(lowercase_ , lowercase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCamelCase ( lowercase_ : Optional[int] , lowercase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase =ParquetDatasetWriter(lowercase_ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowercase =pq.ParquetFile(tmp_path / '''foo.parquet''' )
lowercase =pf.read()
assert dataset.data.table == output_table
def UpperCamelCase ( lowercase_ : Tuple , lowercase_ : List[str] ) -> str:
'''simple docstring'''
lowercase =str(shared_datadir / '''test_image_rgb.jpg''' )
lowercase ={'''image''': [image_path]}
lowercase =Features({'''image''': Image()} )
lowercase =Dataset.from_dict(lowercase_ , features=lowercase_ )
lowercase =ParquetDatasetWriter(lowercase_ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowercase =Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
lowercase =ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=lowercase_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCamelCase ( lowercase_ : str , lowercase_ : List[Any] ) -> List[Any]:
'''simple docstring'''
assert get_writer_batch_size(lowercase_ ) == expected
| 72 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=__lowercase ):
UpperCAmelCase__ = ['''note_seq''']
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def _lowercase (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def _lowercase (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] ) | 626 | 0 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A = "cpu" , _A = "openai/clip-vit-large-patch14" ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = device
SCREAMING_SNAKE_CASE_ = CLIPTokenizerFast.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = [0.4814_5466, 0.457_8275, 0.4082_1073]
SCREAMING_SNAKE_CASE_ = [0.2686_2954, 0.2613_0258, 0.2757_7711]
SCREAMING_SNAKE_CASE_ = torchvision.transforms.Normalize(self.image_mean , self.image_std )
SCREAMING_SNAKE_CASE_ = torchvision.transforms.Resize(224 )
SCREAMING_SNAKE_CASE_ = torchvision.transforms.CenterCrop(224 )
def _UpperCamelCase ( self , _A ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.resize(_A )
SCREAMING_SNAKE_CASE_ = self.center_crop(_A )
SCREAMING_SNAKE_CASE_ = self.normalize(_A )
return images
def __call__( self , _A=None , _A=None , **_A ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.tokenizer(text=_A , **_A )
SCREAMING_SNAKE_CASE_ = self.preprocess_img(_A )
SCREAMING_SNAKE_CASE_ = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _A=10 , _A=0.01 , _A=None , _A=None , _A=None , _A=None , _A=None , _A=None , _A=False , _A=True , _A="image" , _A=True , _A=False , _A=False , _A=False , ) -> Optional[int]:
super().__init__()
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = device if device else get_device()
if vqgan:
SCREAMING_SNAKE_CASE_ = vqgan
else:
SCREAMING_SNAKE_CASE_ = load_vqgan(self.device , conf_path=_A , ckpt_path=_A )
self.vqgan.eval()
if clip:
SCREAMING_SNAKE_CASE_ = clip
else:
SCREAMING_SNAKE_CASE_ = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
SCREAMING_SNAKE_CASE_ = ProcessorGradientFlow(device=self.device )
SCREAMING_SNAKE_CASE_ = iterations
SCREAMING_SNAKE_CASE_ = lr
SCREAMING_SNAKE_CASE_ = log
SCREAMING_SNAKE_CASE_ = make_grid
SCREAMING_SNAKE_CASE_ = return_val
SCREAMING_SNAKE_CASE_ = quantize
SCREAMING_SNAKE_CASE_ = self.vqgan.decoder.z_shape
def _UpperCamelCase ( self , _A=None , _A=None , _A=5 , _A=True ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = []
if output_path is None:
SCREAMING_SNAKE_CASE_ = './animation.gif'
if input_path is None:
SCREAMING_SNAKE_CASE_ = self.save_path
SCREAMING_SNAKE_CASE_ = sorted(glob(input_path + '''/*''' ) )
if not len(_A ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(_A ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
SCREAMING_SNAKE_CASE_ = total_duration / len(_A )
SCREAMING_SNAKE_CASE_ = [frame_duration] * len(_A )
if extend_frames:
SCREAMING_SNAKE_CASE_ = 1.5
SCREAMING_SNAKE_CASE_ = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(_A ) )
imageio.mimsave(_A , _A , duration=_A )
print(F'''gif saved to {output_path}''' )
def _UpperCamelCase ( self , _A=None , _A=None ) -> Optional[Any]:
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
SCREAMING_SNAKE_CASE_ = preprocess(Image.open(_A ) , target_image_size=256 ).to(self.device )
SCREAMING_SNAKE_CASE_ = preprocess_vqgan(_A )
SCREAMING_SNAKE_CASE_ = self.vqgan.encode(_A )
return z
def _UpperCamelCase ( self , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.latent.detach().requires_grad_()
SCREAMING_SNAKE_CASE_ = base_latent + transform_vector
if self.quantize:
SCREAMING_SNAKE_CASE_ = self.vqgan.quantize(_A )
else:
SCREAMING_SNAKE_CASE_ = trans_latent
return self.vqgan.decode(_A )
def _UpperCamelCase ( self , _A , _A , _A=None ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.clip_preprocessor(text=_A , images=_A , return_tensors='''pt''' , padding=_A )
SCREAMING_SNAKE_CASE_ = self.clip(**_A )
SCREAMING_SNAKE_CASE_ = clip_outputs.logits_per_image
if weights is not None:
SCREAMING_SNAKE_CASE_ = similarity_logits * weights
return similarity_logits.sum()
def _UpperCamelCase ( self , _A , _A , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self._get_clip_similarity(pos_prompts['''prompts'''] , _A , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
SCREAMING_SNAKE_CASE_ = self._get_clip_similarity(neg_prompts['''prompts'''] , _A , weights=neg_prompts['''weights'''] )
else:
SCREAMING_SNAKE_CASE_ = torch.tensor([1] , device=self.device )
SCREAMING_SNAKE_CASE_ = -torch.log(_A ) + torch.log(_A )
return loss
def _UpperCamelCase ( self , _A , _A , _A ) -> str:
SCREAMING_SNAKE_CASE_ = torch.randn_like(self.latent , requires_grad=_A , device=self.device )
SCREAMING_SNAKE_CASE_ = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
SCREAMING_SNAKE_CASE_ = self._add_vector(_A )
SCREAMING_SNAKE_CASE_ = loop_post_process(_A )
SCREAMING_SNAKE_CASE_ = self._get_CLIP_loss(_A , _A , _A )
print('''CLIP loss''' , _A )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=_A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def _UpperCamelCase ( self , _A , _A , _A ) -> List[str]:
wandb.init(reinit=_A , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
SCREAMING_SNAKE_CASE_ = Image.open(_A )
SCREAMING_SNAKE_CASE_ = image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(_A ) )
def _UpperCamelCase ( self , _A ) -> Tuple:
if not prompts:
return []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(_A , (tuple, list) ):
SCREAMING_SNAKE_CASE_ = prompt[0]
SCREAMING_SNAKE_CASE_ = float(prompt[1] )
elif ":" in prompt:
SCREAMING_SNAKE_CASE_ = prompt.split(''':''' )
SCREAMING_SNAKE_CASE_ = float(_A )
else:
SCREAMING_SNAKE_CASE_ = prompt
SCREAMING_SNAKE_CASE_ = 1.0
processed_prompts.append(_A )
weights.append(_A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(_A , device=self.device ),
}
def _UpperCamelCase ( self , _A , _A=None , _A=None , _A=True , _A=False , _A=True , _A=True , _A=None , ) -> Tuple:
if image_path:
SCREAMING_SNAKE_CASE_ = self._get_latent(_A )
else:
SCREAMING_SNAKE_CASE_ = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(_A , _A , _A )
assert pos_prompts, "You must provide at least one positive prompt."
SCREAMING_SNAKE_CASE_ = self.process_prompts(_A )
SCREAMING_SNAKE_CASE_ = self.process_prompts(_A )
if save_final and save_path is None:
SCREAMING_SNAKE_CASE_ = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(_A ):
os.makedirs(_A )
else:
SCREAMING_SNAKE_CASE_ = save_path + '_' + get_timestamp()
os.makedirs(_A )
SCREAMING_SNAKE_CASE_ = save_path
SCREAMING_SNAKE_CASE_ = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(_A ) )
SCREAMING_SNAKE_CASE_ = loop_post_process(_A )
for iter, transformed_img in enumerate(self._optimize_CLIP(_A , _A , _A ) ):
if show_intermediate:
show_pil(_A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(_A )} )
if show_final:
show_pil(_A )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
| 703 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( __lowerCamelCase = "laptop" ):
SCREAMING_SNAKE_CASE_ = F'''https://www.amazon.in/laptop/s?k={product}'''
SCREAMING_SNAKE_CASE_ = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
SCREAMING_SNAKE_CASE_ = BeautifulSoup(requests.get(__lowerCamelCase, headers=__lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
SCREAMING_SNAKE_CASE_ = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''', attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''}, ), soup.find_all('''div''', attrs={'''class''': '''a-row a-size-base a-color-base'''} ), ):
try:
SCREAMING_SNAKE_CASE_ = item.ha.text
SCREAMING_SNAKE_CASE_ = '''https://www.amazon.in/''' + item.ha.a['''href''']
SCREAMING_SNAKE_CASE_ = item.find('''span''', attrs={'''class''': '''a-offscreen'''} ).text
try:
SCREAMING_SNAKE_CASE_ = item.find('''span''', attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
SCREAMING_SNAKE_CASE_ = '''Not available'''
try:
SCREAMING_SNAKE_CASE_ = (
'''₹'''
+ item.find(
'''span''', attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
SCREAMING_SNAKE_CASE_ = ''''''
try:
SCREAMING_SNAKE_CASE_ = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''', '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''', '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''', '''''' ) )
)
* 1_00 )
except ValueError:
SCREAMING_SNAKE_CASE_ = float('''nan''' )
except AttributeError:
pass
SCREAMING_SNAKE_CASE_ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
SCREAMING_SNAKE_CASE_ = ''' '''
SCREAMING_SNAKE_CASE_ = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__UpperCAmelCase = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 597 | 0 |
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ = " " ):
_a : List[str] = []
_a : Union[str, Any] = 0
for index, char in enumerate(UpperCamelCase_ ):
if char == separator:
split_words.append(string[last_index:index] )
_a : Any = index + 1
elif index + 1 == len(UpperCamelCase_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 471 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : List[str] = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Dict = 'timesformer'
def __init__( self : Union[str, Any] , __snake_case : str=224 , __snake_case : List[str]=16 , __snake_case : Dict=3 , __snake_case : int=8 , __snake_case : Dict=768 , __snake_case : str=12 , __snake_case : Any=12 , __snake_case : int=3072 , __snake_case : Union[str, Any]="gelu" , __snake_case : Union[str, Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Optional[int]=0.02 , __snake_case : Optional[Any]=1E-6 , __snake_case : Dict=True , __snake_case : Any="divided_space_time" , __snake_case : List[Any]=0 , **__snake_case : List[str] , ) -> Optional[int]:
super().__init__(**__snake_case )
_a : List[Any] = image_size
_a : str = patch_size
_a : Optional[int] = num_channels
_a : List[str] = num_frames
_a : Optional[Any] = hidden_size
_a : Dict = num_hidden_layers
_a : Any = num_attention_heads
_a : Optional[int] = intermediate_size
_a : Tuple = hidden_act
_a : Union[str, Any] = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : int = initializer_range
_a : Optional[int] = layer_norm_eps
_a : Dict = qkv_bias
_a : str = attention_type
_a : Optional[int] = drop_path_rate
| 471 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A : List[Any] = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 704 |
from __future__ import annotations
def lowercase_ ( _A : str , _A : list[str] | None = None , _A : dict[str, float] | None = None , _A : bool = False , ):
"""simple docstring"""
lowerCamelCase__ : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCamelCase__ : Dict = {
"a": 0.08_497,
"b": 0.01_492,
"c": 0.02_202,
"d": 0.04_253,
"e": 0.11_162,
"f": 0.02_228,
"g": 0.02_015,
"h": 0.06_094,
"i": 0.07_546,
"j": 0.00_153,
"k": 0.01_292,
"l": 0.04_025,
"m": 0.02_406,
"n": 0.06_749,
"o": 0.07_507,
"p": 0.01_929,
"q": 0.00_095,
"r": 0.07_587,
"s": 0.06_327,
"t": 0.09_356,
"u": 0.02_758,
"v": 0.00_978,
"w": 0.02_560,
"x": 0.00_150,
"y": 0.01_994,
"z": 0.00_077,
}
else:
# Custom frequencies dictionary
lowerCamelCase__ : Optional[int] = frequencies_dict
if not case_sensitive:
lowerCamelCase__ : str = ciphertext.lower()
# Chi squared statistic values
lowerCamelCase__ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_A ) ):
lowerCamelCase__ : Optional[Any] = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCamelCase__ : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len(
_A )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCamelCase__ : str = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCamelCase__ : List[str] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ : List[str] = decrypted_with_shift.lower().count(_A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ : List[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ : str = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ : Any = decrypted_with_shift.count(_A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ : str = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCamelCase__ : Optional[int] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_A : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCamelCase__ : int = min(
_A , key=_A , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 5 | 0 |
"""simple docstring"""
def __UpperCamelCase ( snake_case__ = 100 ):
A_ : Any = 0
A_ : Optional[int] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 180 |
"""simple docstring"""
from __future__ import annotations
def __UpperCamelCase ( snake_case__ , snake_case__ ):
if len(snake_case__ ) == 0:
return False
A_ : Union[str, Any] = len(snake_case__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , snake_case__ )
else:
return binary_search(a_list[midpoint + 1 :] , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = input("Enter numbers separated by comma:\n").strip()
_lowerCAmelCase = [int(item.strip()) for item in user_input.split(",")]
_lowerCAmelCase = int(input("Enter the number to be found in the list:\n").strip())
_lowerCAmelCase = "" if binary_search(sequence, target) else "not "
print(F'{target} was {not_str}found in {sequence}')
| 180 | 1 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Union[str, Any] = """AutoTokenizer"""
__SCREAMING_SNAKE_CASE : Dict = ["""tokenizer"""]
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any]=None ):
super().__init__(__UpperCamelCase )
_UpperCAmelCase = speaker_embeddings
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any]="speaker_embeddings_path.json" , **__UpperCamelCase : Union[str, Any] ):
if speaker_embeddings_dict_path is not None:
_UpperCAmelCase = get_file_from_repo(
__UpperCamelCase , __UpperCamelCase , subfolder=kwargs.pop("subfolder" , __UpperCamelCase ) , cache_dir=kwargs.pop("cache_dir" , __UpperCamelCase ) , force_download=kwargs.pop("force_download" , __UpperCamelCase ) , proxies=kwargs.pop("proxies" , __UpperCamelCase ) , resume_download=kwargs.pop("resume_download" , __UpperCamelCase ) , local_files_only=kwargs.pop("local_files_only" , __UpperCamelCase ) , use_auth_token=kwargs.pop("use_auth_token" , __UpperCamelCase ) , revision=kwargs.pop("revision" , __UpperCamelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(__UpperCamelCase , __UpperCamelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
_UpperCAmelCase = None
else:
with open(__UpperCamelCase ) as speaker_embeddings_json:
_UpperCAmelCase = json.load(__UpperCamelCase )
else:
_UpperCAmelCase = None
_UpperCAmelCase = AutoTokenizer.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
return cls(tokenizer=__UpperCamelCase , speaker_embeddings=__UpperCamelCase )
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Any="speaker_embeddings_path.json" , __UpperCamelCase : List[str]="speaker_embeddings" , __UpperCamelCase : bool = False , **__UpperCamelCase : Dict , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__UpperCamelCase , __UpperCamelCase , "v2" ) , exist_ok=__UpperCamelCase )
_UpperCAmelCase = {}
_UpperCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCAmelCase = self._load_voice_preset(__UpperCamelCase )
_UpperCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , __UpperCamelCase , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=__UpperCamelCase , )
_UpperCAmelCase = os.path.join(__UpperCamelCase , F'''{prompt_key}_{key}.npy''' )
_UpperCAmelCase = tmp_dict
with open(os.path.join(__UpperCamelCase , __UpperCamelCase ) , "w" ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
super().save_pretrained(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : str = None , **__UpperCamelCase : int ):
_UpperCAmelCase = self.speaker_embeddings[voice_preset]
_UpperCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
_UpperCAmelCase = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , __UpperCamelCase ) , cache_dir=kwargs.pop("cache_dir" , __UpperCamelCase ) , force_download=kwargs.pop("force_download" , __UpperCamelCase ) , proxies=kwargs.pop("proxies" , __UpperCamelCase ) , resume_download=kwargs.pop("resume_download" , __UpperCamelCase ) , local_files_only=kwargs.pop("local_files_only" , __UpperCamelCase ) , use_auth_token=kwargs.pop("use_auth_token" , __UpperCamelCase ) , revision=kwargs.pop("revision" , __UpperCamelCase ) , )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
_UpperCAmelCase = np.load(__UpperCamelCase )
return voice_preset_dict
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[dict] = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self : Any , __UpperCamelCase : Dict=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Any="pt" , __UpperCamelCase : int=256 , __UpperCamelCase : Dict=False , __UpperCamelCase : Dict=True , __UpperCamelCase : Tuple=False , **__UpperCamelCase : Any , ):
if voice_preset is not None and not isinstance(__UpperCamelCase , __UpperCamelCase ):
if (
isinstance(__UpperCamelCase , __UpperCamelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCAmelCase = self._load_voice_preset(__UpperCamelCase )
else:
if isinstance(__UpperCamelCase , __UpperCamelCase ) and not voice_preset.endswith(".npz" ):
_UpperCAmelCase = voice_preset + ".npz"
_UpperCAmelCase = np.load(__UpperCamelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
_UpperCAmelCase = self.tokenizer(
__UpperCamelCase , return_tensors=__UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , add_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
if voice_preset is not None:
_UpperCAmelCase = voice_preset
return encoded_text
| 703 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 129 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase :List[Any] = logging.get_logger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : str=False ):
_UpperCAmelCase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_UpperCAmelCase : int = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=False ):
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase : List[str] = ""
else:
_UpperCAmelCase : Any = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : List[str] = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_UpperCAmelCase : Dict = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase : Optional[Any] = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : Dict = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ (UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
_UpperCAmelCase : Tuple = dct.pop(UpperCamelCase__ )
_UpperCAmelCase : Tuple = val
def lowerCamelCase_ ():
_UpperCAmelCase : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : List[Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Tuple = DeiTConfig()
# all deit models have fine-tuned heads
_UpperCAmelCase : List[str] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_UpperCAmelCase : List[str] = 1000
_UpperCAmelCase : List[Any] = "huggingface/label-files"
_UpperCAmelCase : Optional[Any] = "imagenet-1k-id2label.json"
_UpperCAmelCase : Any = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
_UpperCAmelCase : Optional[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_UpperCAmelCase : Optional[int] = idalabel
_UpperCAmelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
_UpperCAmelCase : Optional[Any] = int(deit_name[-6:-4] )
_UpperCAmelCase : Any = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
_UpperCAmelCase : List[str] = 192
_UpperCAmelCase : Optional[int] = 768
_UpperCAmelCase : Optional[Any] = 12
_UpperCAmelCase : Optional[int] = 3
elif deit_name[9:].startswith('''small''' ):
_UpperCAmelCase : Optional[Any] = 384
_UpperCAmelCase : List[Any] = 1536
_UpperCAmelCase : int = 12
_UpperCAmelCase : List[str] = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
_UpperCAmelCase : Any = 1024
_UpperCAmelCase : List[Any] = 4096
_UpperCAmelCase : Optional[int] = 24
_UpperCAmelCase : Optional[int] = 16
# load original model from timm
_UpperCAmelCase : Tuple = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase : Union[str, Any] = timm_model.state_dict()
_UpperCAmelCase : List[str] = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
_UpperCAmelCase : Optional[Any] = DeiTForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
_UpperCAmelCase : Union[str, Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_UpperCAmelCase : int = DeiTImageProcessor(size=UpperCamelCase__ , crop_size=config.image_size )
_UpperCAmelCase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
_UpperCAmelCase : Optional[Any] = encoding["pixel_values"]
_UpperCAmelCase : Tuple = model(UpperCamelCase__ )
_UpperCAmelCase : Dict = timm_model(UpperCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase__ , outputs.logits , atol=1E-3 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_lowerCAmelCase :List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 506 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _A ( A ,A ,A ,A ) -> List[Any]:
lowercase : Optional[int] = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase : Optional[int] = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
lowercase : Dict = F'''{src_lang}-{tgt_lang}'''
lowercase : Union[str, Any] = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=A ,exist_ok=A )
lowercase : str = os.path.join(A ,"README.md" )
print(F'''Generating {path}''' )
with open(A ,"w" ,encoding="utf-8" ) as f:
f.write(A )
# make sure we are under the root of the project
lowerCAmelCase : Union[str, Any] = Path(__file__).resolve().parent.parent.parent
lowerCAmelCase : Union[str, Any] = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
lowerCAmelCase : Optional[Any] = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 372 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_A = len(__A )
_A = len(matrix[0] )
_A = min(__A , __A )
for row in range(__A ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , __A ):
_A = matrix[col][row] / matrix[row][row]
for i in range(__A , __A ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
_A = True
for i in range(row + 1 , __A ):
if matrix[i][row] != 0:
_A , _A = matrix[i], matrix[row]
_A = False
break
if reduce:
rank -= 1
for i in range(__A ):
_A = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
"""simple docstring"""
def _snake_case ( _snake_case : list , _snake_case : list ) -> float:
'''simple docstring'''
_validate_point(_snake_case )
_validate_point(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(_snake_case , _snake_case ) ) )
def _snake_case ( _snake_case : list[float] ) -> None:
'''simple docstring'''
if point:
if isinstance(_snake_case , _snake_case ):
for item in point:
if not isinstance(_snake_case , (int, float) ):
_A = (
'Expected a list of numbers as input, found '
F'''{type(_snake_case ).__name__}'''
)
raise TypeError(_snake_case )
else:
_A = F'''Expected a list of numbers as input, found {type(_snake_case ).__name__}'''
raise TypeError(_snake_case )
else:
raise ValueError('Missing an input' )
def _snake_case ( _snake_case : list , _snake_case : list ) -> float:
'''simple docstring'''
_validate_point(_snake_case )
_validate_point(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(_snake_case , _snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 505 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : List[Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
_snake_case : Union[str, Any] = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
_snake_case : Tuple = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class A ( _a ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = SqueezeBertTokenizer
def __init__( self : str , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : Any="[CLS]" , lowerCAmelCase_ : List[str]="[MASK]" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**lowerCAmelCase_ )
_a = do_lower_case
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 22 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =VideoToVideoSDPipeline
__A : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"}
__A : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"}
__A : str =PipelineTesterMixin.required_optional_params - {"latents"}
__A : Dict =False
# No `output_type`.
__A : Optional[int] =frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") ,up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
UpperCAmelCase_ : int = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=1_28 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="gelu" ,projection_dim=5_12 ,)
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
# 3 frames
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 3, 32, 32) ,rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Tuple = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.get_dummy_components()
UpperCAmelCase_ : str = VideoToVideoSDPipeline(**_snake_case )
UpperCAmelCase_ : int = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = "np"
UpperCAmelCase_ : Dict = sd_pipe(**_snake_case ).frames
UpperCAmelCase_ : Tuple = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase_ : Dict = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ,expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" ,torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ : int = torch.randn((1, 10, 3, 10_24, 5_76) ,generator=_snake_case )
UpperCAmelCase_ : List[Any] = video.to("cuda" )
UpperCAmelCase_ : List[Any] = "Spiderman is surfing"
UpperCAmelCase_ : Optional[Any] = pipe(_snake_case ,video=_snake_case ,generator=_snake_case ,num_inference_steps=3 ,output_type="pt" ).frames
UpperCAmelCase_ : Any = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 71 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = list(range(len(lowerCAmelCase__ ) ) )
A__ = [v / w for v, w in zip(lowerCAmelCase__ ,lowerCAmelCase__ )]
index.sort(key=lambda lowerCAmelCase__ : ratio[i] ,reverse=lowerCAmelCase__ )
A__ = 0
A__ = [0] * len(lowerCAmelCase__ )
for i in index:
if weight[i] <= capacity:
A__ = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 554 |
"""simple docstring"""
import torch
from transformers import AutoModel
class snake_case_ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , __a="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__a , self ).__init__()
A__ = AutoModel.from_pretrained(__a , return_dict=__a )
A__ = torch.nn.CosineSimilarity(3 , 1E-08 )
A__ = torch.nn.Softmax(dim=1 )
def _UpperCAmelCase ( self , **__a ):
"""simple docstring"""
return self.bert(**__a ).last_hidden_state
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__a )
def _UpperCAmelCase ( self , __a , __a , __a=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__a , __a ) )
def _UpperCAmelCase ( self , __a , __a ):
"""simple docstring"""
A__ = W_supports['sizes'].tolist()
A__ = W_supports['start_token_id'].item()
A__ = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
A__ = self.BERT(**__a )
A__ = self.BERT(**__a )
A__ = None
A__ = None
A__ = W_supports['input_ids'] == start_token_id
A__ = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
A__ = 0
else:
A__ = support_sizes[i - 1]
A__ = S[s : s + size][start_token_masks[s : s + size]]
A__ = S[s : s + size][end_token_masks[s : s + size]]
A__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
A__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
A__ = torch.vstack((p_starts, p_start) )
A__ = torch.vstack((p_ends, p_end) )
else:
A__ = p_start
A__ = p_end
return p_starts, p_ends
| 554 | 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:List[str] , _a:pyspark.sql.DataFrame , _a:Optional[NamedSplit] = None , _a:Optional[Features] = None , _a:bool = True , _a:str = None , _a:bool = False , _a:str = None , _a:bool = True , _a:str = "arrow" , **_a:List[str] , ):
super().__init__(
split=_a , features=_a , cache_dir=_a , keep_in_memory=_a , streaming=_a , **_a , )
snake_case__ = load_from_cache_file
snake_case__ = file_format
snake_case__ = Spark(
df=_a , features=_a , cache_dir=_a , working_dir=_a , **_a , )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
snake_case__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_a , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 33 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = ['image_processor', 'tokenizer']
__lowercase : str = 'AutoImageProcessor'
__lowercase : Dict = 'AutoTokenizer'
def __init__( self:int , _a:List[str]=None , _a:Optional[Any]=None , **_a:List[str] ):
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
snake_case__ = kwargs.pop('''feature_extractor''' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
snake_case__ = self.image_processor
snake_case__ = False
def __call__( self:Optional[int] , *_a:str , **_a:int ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
snake_case__ = kwargs.pop('''images''' , _a )
snake_case__ = kwargs.pop('''text''' , _a )
if len(_a ) > 0:
snake_case__ = args[0]
snake_case__ = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case__ = self.image_processor(_a , *_a , **_a )
if text is not None:
snake_case__ = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Union[str, Any] , **_a:Any ):
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , *_a:Union[str, Any] , **_a:Optional[int] ):
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
snake_case__ = True
snake_case__ = self.tokenizer
yield
snake_case__ = self.image_processor
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Dict , _a:Dict=False , _a:Optional[int]=None ):
if added_vocab is None:
snake_case__ = self.tokenizer.get_added_vocab()
snake_case__ = {}
while tokens:
snake_case__ = re.search(r'''<s_(.*?)>''' , _a , re.IGNORECASE )
if start_token is None:
break
snake_case__ = start_token.group(1 )
snake_case__ = re.search(rF"""</s_{key}>""" , _a , re.IGNORECASE )
snake_case__ = start_token.group()
if end_token is None:
snake_case__ = tokens.replace(_a , '''''' )
else:
snake_case__ = end_token.group()
snake_case__ = re.escape(_a )
snake_case__ = re.escape(_a )
snake_case__ = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _a , re.IGNORECASE )
if content is not None:
snake_case__ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
snake_case__ = self.tokenajson(_a , is_inner_value=_a , added_vocab=_a )
if value:
if len(_a ) == 1:
snake_case__ = value[0]
snake_case__ = value
else: # leaf nodes
snake_case__ = []
for leaf in content.split(r'''<sep/>''' ):
snake_case__ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
snake_case__ = leaf[1:-2] # for categorical special tokens
output[key].append(_a )
if len(output[key] ) == 1:
snake_case__ = output[key][0]
snake_case__ = tokens[tokens.find(_a ) + len(_a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_a , added_vocab=_a )
if len(_a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 33 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = 1
snake_case__ = 3
snake_case__ = (3_2, 3_2)
snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(__snake_case)
return image
@property
def __magic_name__ ( self : Tuple):
'''simple docstring'''
torch.manual_seed(0)
snake_case__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
return model
@property
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
torch.manual_seed(0)
snake_case__ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __magic_name__ ( self : str):
'''simple docstring'''
torch.manual_seed(0)
snake_case__ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(__snake_case)
@property
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
def extract(*UpperCamelCase__ : Dict , **UpperCamelCase__ : Optional[Any]):
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str]):
'''simple docstring'''
snake_case__ = torch.ones([0])
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Any):
'''simple docstring'''
self.pixel_values.to(__snake_case)
return self
return Out()
return extract
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case__ = self.dummy_cond_unet
snake_case__ = PNDMScheduler(skip_prk_steps=__snake_case)
snake_case__ = self.dummy_vae
snake_case__ = self.dummy_text_encoder
snake_case__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""")
snake_case__ = 7_7
snake_case__ = self.dummy_image.to(__snake_case)
snake_case__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case__ = AltDiffusionImgaImgPipeline(
unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , )
snake_case__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__snake_case)
snake_case__ = alt_pipe.to(__snake_case)
alt_pipe.set_progress_bar_config(disable=__snake_case)
snake_case__ = '''A painting of a squirrel eating a burger'''
snake_case__ = torch.Generator(device=__snake_case).manual_seed(0)
snake_case__ = alt_pipe(
[prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__snake_case , )
snake_case__ = output.images
snake_case__ = torch.Generator(device=__snake_case).manual_seed(0)
snake_case__ = alt_pipe(
[prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__snake_case , return_dict=__snake_case , )[0]
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5E-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""")
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = self.dummy_cond_unet
snake_case__ = PNDMScheduler(skip_prk_steps=__snake_case)
snake_case__ = self.dummy_vae
snake_case__ = self.dummy_text_encoder
snake_case__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""")
snake_case__ = 7_7
snake_case__ = self.dummy_image.to(__snake_case)
# put models in fp16
snake_case__ = unet.half()
snake_case__ = vae.half()
snake_case__ = bert.half()
# make sure here that pndm scheduler skips prk
snake_case__ = AltDiffusionImgaImgPipeline(
unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , )
snake_case__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__snake_case)
snake_case__ = alt_pipe.to(__snake_case)
alt_pipe.set_progress_bar_config(disable=__snake_case)
snake_case__ = '''A painting of a squirrel eating a burger'''
snake_case__ = torch.manual_seed(0)
snake_case__ = alt_pipe(
[prompt] , generator=__snake_case , num_inference_steps=2 , output_type="""np""" , image=__snake_case , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""")
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""")
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case__ = init_image.resize((7_6_0, 5_0_4))
snake_case__ = '''BAAI/AltDiffusion'''
snake_case__ = AltDiffusionImgaImgPipeline.from_pretrained(
__snake_case , safety_checker=__snake_case , )
pipe.to(__snake_case)
pipe.set_progress_bar_config(disable=__snake_case)
pipe.enable_attention_slicing()
snake_case__ = '''A fantasy landscape, trending on artstation'''
snake_case__ = torch.manual_seed(0)
snake_case__ = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , generator=__snake_case , output_type="""np""" , )
snake_case__ = output.images[0]
snake_case__ = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
snake_case__ = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""")
snake_case__ = init_image.resize((7_6_8, 5_1_2))
snake_case__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""")
snake_case__ = '''BAAI/AltDiffusion'''
snake_case__ = AltDiffusionImgaImgPipeline.from_pretrained(
__snake_case , safety_checker=__snake_case , )
pipe.to(__snake_case)
pipe.set_progress_bar_config(disable=__snake_case)
pipe.enable_attention_slicing()
snake_case__ = '''A fantasy landscape, trending on artstation'''
snake_case__ = torch.manual_seed(0)
snake_case__ = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , generator=__snake_case , output_type="""np""" , )
snake_case__ = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image).max() < 1E-2
| 705 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _UpperCAmelCase ( a : List[Any] ):
snake_case__ = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case__ = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case__ = 0.01
with locka.acquire():
with pytest.raises(a ):
snake_case__ = time.time()
locka.acquire(a )
assert time.time() - _start > timeout
def _UpperCAmelCase ( a : str ):
snake_case__ = """a""" * 1000 + """.lock"""
snake_case__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(a )
assert len(os.path.basename(locka._lock_file ) ) <= 255
snake_case__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(a ):
locka.acquire(0 )
| 99 | 0 |
from collections.abc import Sequence
def snake_case ( snake_case__ :Sequence[float] , snake_case__ :float) -> float:
return sum(c * (x**i) for i, c in enumerate(snake_case__))
def snake_case ( snake_case__ :Sequence[float] , snake_case__ :float) -> float:
_A = 0.0
for coeff in reversed(snake_case__):
_A = result * x + coeff
return result
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = (0.0, 0.0, 5.0, 9.3, 7.0)
_SCREAMING_SNAKE_CASE = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 401 | from __future__ import annotations
def snake_case ( snake_case__ :list[int]) -> int:
if not nums:
return 0
_A = nums[0]
_A = 0
for num in nums[1:]:
_A , _A = (
max_excluding + num,
max(snake_case__ , snake_case__),
)
return max(snake_case__ , snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 401 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowerCamelCase : str =random.Random()
def _lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple=1.0 , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : List[str]=None ) -> str:
'''simple docstring'''
if rng is None:
__A : Optional[Any] = global_rng
__A : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=7 , __lowerCamelCase=400 , __lowerCamelCase=2000 , __lowerCamelCase=1 , __lowerCamelCase=0.0 , __lowerCamelCase=16000 , __lowerCamelCase=True , __lowerCamelCase=80 , __lowerCamelCase=16 , __lowerCamelCase=64 , __lowerCamelCase="hann_window" , __lowerCamelCase=80 , __lowerCamelCase=7600 , __lowerCamelCase=1e-10 , __lowerCamelCase=True , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : str = batch_size
__A : List[Any] = min_seq_length
__A : Any = max_seq_length
__A : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__A : Optional[int] = feature_size
__A : Dict = padding_value
__A : List[Any] = sampling_rate
__A : Union[str, Any] = do_normalize
__A : Union[str, Any] = num_mel_bins
__A : Tuple = hop_length
__A : int = win_length
__A : Dict = win_function
__A : List[Any] = fmin
__A : List[str] = fmax
__A : Any = mel_floor
__A : str = return_attention_mask
def _a ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _a ( self , __lowerCamelCase=False , __lowerCamelCase=False ):
'''simple docstring'''
def _flatten(__lowerCamelCase ):
return list(itertools.chain(*__lowerCamelCase ) )
if equal_length:
__A : List[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__A : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__A : Optional[Any] = [np.asarray(__lowerCamelCase ) for x in speech_inputs]
return speech_inputs
def _a ( self , __lowerCamelCase=False , __lowerCamelCase=False ):
'''simple docstring'''
if equal_length:
__A : Optional[Any] = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__A : Optional[int] = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__A : Tuple = [np.asarray(__lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class __snake_case( A_ , unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = SpeechTaFeatureExtractor
def _a ( self ):
'''simple docstring'''
__A : Any = SpeechTaFeatureExtractionTester(self )
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(__lowerCamelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowerCamelCase , axis=0 ) - 1 ) < 1e-3 ) )
def _a ( self ):
'''simple docstring'''
__A : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__A : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__A : str = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
__A : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
__A : Any = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
# Test batched
__A : int = feat_extract(__lowerCamelCase , return_tensors='np' ).input_values
__A : Optional[Any] = feat_extract(__lowerCamelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def _a ( self ):
'''simple docstring'''
__A : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__A : List[str] = ['longest', 'max_length', 'do_not_pad']
__A : List[str] = [None, 1600, None]
for max_length, padding in zip(__lowerCamelCase , __lowerCamelCase ):
__A : int = feat_extract(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors='np' )
__A : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _a ( self ):
'''simple docstring'''
__A : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : Optional[Any] = range(800 , 1400 , 200 )
__A : Union[str, Any] = [floats_list((1, x) )[0] for x in lengths]
__A : Any = ['longest', 'max_length', 'do_not_pad']
__A : List[Any] = [None, 1600, None]
for max_length, padding in zip(__lowerCamelCase , __lowerCamelCase ):
__A : Optional[Any] = feat_extract(__lowerCamelCase , max_length=__lowerCamelCase , padding=__lowerCamelCase )
__A : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _a ( self ):
'''simple docstring'''
__A : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__A : Union[str, Any] = feat_extract(
__lowerCamelCase , truncation=__lowerCamelCase , max_length=1000 , padding='max_length' , return_tensors='np' )
__A : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _a ( self ):
'''simple docstring'''
__A : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__A : Any = feat_extract(
__lowerCamelCase , truncation=__lowerCamelCase , max_length=1000 , padding='longest' , return_tensors='np' )
__A : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
__A : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__A : Dict = feat_extract(
__lowerCamelCase , truncation=__lowerCamelCase , max_length=2000 , padding='longest' , return_tensors='np' )
__A : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def _a ( self ):
'''simple docstring'''
__A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : Union[str, Any] = np.random.rand(100 ).astype(np.floataa )
__A : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__A : Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__A : int = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _a ( self ):
'''simple docstring'''
__A : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__A : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__A : str = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
# Test feature size
__A : str = feature_extractor(audio_target=__lowerCamelCase , padding=__lowerCamelCase , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
__A : Union[str, Any] = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
__A : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
# Test batched
__A : List[str] = feature_extractor(__lowerCamelCase , return_tensors='np' ).input_values
__A : Any = feature_extractor(__lowerCamelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__A : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__A : str = np.asarray(__lowerCamelCase )
__A : int = feature_extractor(__lowerCamelCase , return_tensors='np' ).input_values
__A : Tuple = feature_extractor(__lowerCamelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def _a ( self ):
'''simple docstring'''
__A : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
__A : str = self.feature_extraction_class(**self.feat_extract_dict )
__A : Union[str, Any] = feat_extract.model_input_names[0]
__A : List[str] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__lowerCamelCase ) == len(__lowerCamelCase ) for x, y in zip(__lowerCamelCase , processed_features[input_name] ) ) )
__A : str = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__lowerCamelCase )
__A : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
__A : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__A : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _a ( self ):
'''simple docstring'''
__A : List[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__lowerCamelCase )
__A : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
__A : List[Any] = feat_extract.model_input_names[0]
__A : Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
__A : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__A : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _a ( self ):
'''simple docstring'''
__A : str = self.feature_extraction_class(**self.feat_extract_dict )
__A : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
__A : Union[str, Any] = feat_extract.model_input_names[0]
__A : List[Any] = BatchFeature({input_name: speech_inputs} )
__A : Optional[int] = feat_extract.num_mel_bins # hack!
__A : int = feat_extract.pad(__lowerCamelCase , padding='longest' , return_tensors='np' )[input_name]
__A : List[Any] = feat_extract.pad(__lowerCamelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _a ( self ):
'''simple docstring'''
__A : str = self.feat_extract_dict
__A : str = True
__A : Any = self.feature_extraction_class(**__lowerCamelCase )
__A : Dict = self.feat_extract_tester.prepare_inputs_for_target()
__A : int = [len(__lowerCamelCase ) for x in speech_inputs]
__A : Tuple = feat_extract.model_input_names[0]
__A : Dict = BatchFeature({input_name: speech_inputs} )
__A : List[Any] = feat_extract.num_mel_bins # hack!
__A : Union[str, Any] = feat_extract.pad(__lowerCamelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , __lowerCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __lowerCamelCase )
def _a ( self ):
'''simple docstring'''
__A : List[Any] = self.feat_extract_dict
__A : Tuple = True
__A : List[str] = self.feature_extraction_class(**__lowerCamelCase )
__A : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
__A : Any = [len(__lowerCamelCase ) for x in speech_inputs]
__A : Any = feat_extract.model_input_names[0]
__A : List[str] = BatchFeature({input_name: speech_inputs} )
__A : str = min(__lowerCamelCase )
__A : Optional[Any] = feat_extract.num_mel_bins # hack!
__A : Tuple = feat_extract.pad(
__lowerCamelCase , padding='max_length' , max_length=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors='np' )
self.assertIn('attention_mask' , __lowerCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
from datasets import load_dataset
__A : int = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__A : List[str] = ds.sort('id' ).select(range(__lowerCamelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _a ( self ):
'''simple docstring'''
__A : Optional[int] = torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] )
# fmt: on
__A : Dict = self._load_datasamples(1 )
__A : Union[str, Any] = SpeechTaFeatureExtractor()
__A : Any = feature_extractor(__lowerCamelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , __lowerCamelCase , atol=1e-6 ) )
def _a ( self ):
'''simple docstring'''
__A : List[str] = torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
__A : Dict = self._load_datasamples(1 )
__A : Optional[Any] = SpeechTaFeatureExtractor()
__A : str = feature_extractor(audio_target=__lowerCamelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , __lowerCamelCase , atol=1e-4 ) )
| 237 | """simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : List[str] =logging.get_logger(__name__)
lowerCamelCase : str ={'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCamelCase : Any ={
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
lowerCamelCase : Any ={
'''moussaKam/mbarthez''': 10_24,
'''moussaKam/barthez''': 10_24,
'''moussaKam/barthez-orangesum-title''': 10_24,
}
lowerCamelCase : Tuple ='''▁'''
class __snake_case( A_ ):
'''simple docstring'''
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase = None , **__lowerCamelCase , ):
'''simple docstring'''
__A : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
__A : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
__A : Any = vocab_file
__A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
__A : Optional[int] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__A : Any = len(self.sp_model ) - 1
__A : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _a ( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : int = [self.cls_token_id]
__A : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _a ( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
__A : int = [self.sep_token_id]
__A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _a ( self ):
'''simple docstring'''
return len(self.sp_model )
def _a ( self ):
'''simple docstring'''
__A : List[Any] = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__A : int = self.sp_model.PieceToId(__lowerCamelCase )
return spm_id if spm_id else self.unk_token_id
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__lowerCamelCase )
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
__A : str = []
__A : str = ''
__A : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
__A : Any = True
__A : Union[str, Any] = []
else:
current_sub_tokens.append(__lowerCamelCase )
__A : Any = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __getstate__( self ):
'''simple docstring'''
__A : int = self.__dict__.copy()
__A : Optional[int] = None
return state
def __setstate__( self , __lowerCamelCase ):
'''simple docstring'''
__A : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__A : str = {}
__A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__A : Tuple = os.path.join(
__lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , 'wb' ) as fi:
__A : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 237 | 1 |
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
a__ : Dict = HfApi()
a__ : List[str] = {}
# fmt: off
a__ : Dict = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
a__ : Dict = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
a__ : str = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
a__ : Dict = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
a__ : Any = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
a__ : Optional[int] = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
a__ : Optional[int] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
a__ : str = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
a__ : Union[str, Any] = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
a__ : int = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
a__ : List[str] = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
a__ : Union[str, Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
a__ : Any = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
a__ : List[Any] = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
a__ : List[Any] = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
a__ : Optional[Any] = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
a__ : Tuple = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('CompVis'):
a__ : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
a__ : List[Any] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
a__ : Tuple = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
a__ : int = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
a__ : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1e-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 51 |
import math
import random
def UpperCamelCase ( snake_case__ : float , snake_case__ : bool = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> float:
UpperCamelCase : Optional[Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(snake_case__ ):
# Forward propagation
UpperCamelCase : str = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCamelCase : int = (expected / 100) - layer_a
# Error delta
UpperCamelCase : List[str] = layer_1_error * sigmoid_function(snake_case__ , snake_case__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input('''Expected value: '''))
__UpperCAmelCase = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 40 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a ( __lowercase ):
@staticmethod
@abstractmethod
def snake_case_ ( _lowerCAmelCase ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def snake_case_ ( self ):
"""simple docstring"""
raise NotImplementedError()
| 702 |
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> dict[str, float]:
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCamelCase__ , 2 ) + pow(UpperCamelCase__ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146 | 0 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_UpperCamelCase : Dict = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class _snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
lowerCAmelCase = None
lowerCAmelCase = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
lowerCAmelCase = os.path.abspath('examples' )
for item in os.listdir(_SCREAMING_SNAKE_CASE ):
if item not in EXCLUDE_EXAMPLES:
lowerCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if os.path.isfile(_SCREAMING_SNAKE_CASE ) and ".py" in item_path:
with self.subTest(
tested_script=_SCREAMING_SNAKE_CASE , feature_script=_SCREAMING_SNAKE_CASE , tested_section='main()' if parser_only else 'training_function()' , ):
lowerCAmelCase = compare_against_test(
os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = '\n'.join(_SCREAMING_SNAKE_CASE )
if special_strings is not None:
for string in special_strings:
lowerCAmelCase = diff.replace(_SCREAMING_SNAKE_CASE , '' )
self.assertEqual(_SCREAMING_SNAKE_CASE , '' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.one_complete_example('complete_nlp_example.py' , _SCREAMING_SNAKE_CASE )
self.one_complete_example('complete_nlp_example.py' , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
lowerCAmelCase = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.one_complete_example('complete_cv_example.py' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : List[str] = False
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ):
'''simple docstring'''
super().setUpClass()
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ):
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
lowerCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=_SCREAMING_SNAKE_CASE )
self.assertNotIn('epoch 0:' , _SCREAMING_SNAKE_CASE )
self.assertIn('epoch 1:' , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=_SCREAMING_SNAKE_CASE )
if torch.cuda.is_available():
lowerCAmelCase = torch.cuda.device_count()
else:
lowerCAmelCase = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , _SCREAMING_SNAKE_CASE )
self.assertIn('epoch 1:' , _SCREAMING_SNAKE_CASE )
else:
self.assertIn('epoch 0:' , _SCREAMING_SNAKE_CASE )
self.assertIn('epoch 1:' , _SCREAMING_SNAKE_CASE )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = re.findall('({.+})' , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = [r for r in results if 'accuracy' in r][-1]
lowerCAmelCase = ast.literal_eval(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
lowerCAmelCase = F'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'tracking' ) ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 284 |
'''simple docstring'''
def snake_case ( snake_case : list , snake_case : list , snake_case : int , snake_case : int , snake_case : int ) -> int:
"""simple docstring"""
if index == number_of_items:
return 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = knapsack(snake_case , snake_case , snake_case , snake_case , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase = values[index] + knapsack(
snake_case , snake_case , snake_case , max_weight - weights[index] , index + 1 )
return max(snake_case , snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284 | 1 |
from ...configuration_utils import PretrainedConfig
_SCREAMING_SNAKE_CASE : Optional[int] = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
a = "tapas"
def __init__( self : List[str] , __lowerCamelCase : Any=3_0522 , __lowerCamelCase : str=768 , __lowerCamelCase : Dict=12 , __lowerCamelCase : int=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Dict=1024 , __lowerCamelCase : Tuple=[3, 256, 256, 2, 256, 256, 10] , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : Optional[int]=1e-12 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Any=10.0 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Dict=1.0 , __lowerCamelCase : Any=None , __lowerCamelCase : Any=1.0 , __lowerCamelCase : Any=False , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=1.0 , __lowerCamelCase : Dict=1.0 , __lowerCamelCase : Dict=False , __lowerCamelCase : str=False , __lowerCamelCase : Optional[Any]="ratio" , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[str]=64 , __lowerCamelCase : str=32 , __lowerCamelCase : str=False , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=False , __lowerCamelCase : Dict=False , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : List[str]=None , __lowerCamelCase : Dict=None , **__lowerCamelCase : Any , ) -> str:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_sizes
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
# Fine-tuning task hyperparameters
SCREAMING_SNAKE_CASE__ = positive_label_weight
SCREAMING_SNAKE_CASE__ = num_aggregation_labels
SCREAMING_SNAKE_CASE__ = aggregation_loss_weight
SCREAMING_SNAKE_CASE__ = use_answer_as_supervision
SCREAMING_SNAKE_CASE__ = answer_loss_importance
SCREAMING_SNAKE_CASE__ = use_normalized_answer_loss
SCREAMING_SNAKE_CASE__ = huber_loss_delta
SCREAMING_SNAKE_CASE__ = temperature
SCREAMING_SNAKE_CASE__ = aggregation_temperature
SCREAMING_SNAKE_CASE__ = use_gumbel_for_cells
SCREAMING_SNAKE_CASE__ = use_gumbel_for_aggregation
SCREAMING_SNAKE_CASE__ = average_approximation_function
SCREAMING_SNAKE_CASE__ = cell_selection_preference
SCREAMING_SNAKE_CASE__ = answer_loss_cutoff
SCREAMING_SNAKE_CASE__ = max_num_rows
SCREAMING_SNAKE_CASE__ = max_num_columns
SCREAMING_SNAKE_CASE__ = average_logits_per_cell
SCREAMING_SNAKE_CASE__ = select_one_column
SCREAMING_SNAKE_CASE__ = allow_empty_column_selection
SCREAMING_SNAKE_CASE__ = init_cell_selection_weights_to_zero
SCREAMING_SNAKE_CASE__ = reset_position_index_per_cell
SCREAMING_SNAKE_CASE__ = disable_per_token_loss
# Aggregation hyperparameters
SCREAMING_SNAKE_CASE__ = aggregation_labels
SCREAMING_SNAKE_CASE__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in aggregation_labels.items()}
| 720 |
import math
class UpperCAmelCase__ :
"""simple docstring"""
def lowercase_ ( self : int , __lowerCamelCase : list[list[float]] , __lowerCamelCase : list[int] ) -> int:
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = 0.0
for i in range(len(__lowerCamelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowercase_ ( self : Optional[int] , __lowerCamelCase : list[list[int | float]] , __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : float ) -> list[list[int | float]]:
for i in range(len(__lowerCamelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
SCREAMING_SNAKE_CASE__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
SCREAMING_SNAKE_CASE__ = SelfOrganizingMap()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = 0.5
for _ in range(_A ):
for j in range(len(_A ) ):
# training sample
SCREAMING_SNAKE_CASE__ = training_samples[j]
# Compute the winning vector
SCREAMING_SNAKE_CASE__ = self_organizing_map.get_winner(_A , _A )
# Update the winning vector
SCREAMING_SNAKE_CASE__ = self_organizing_map.update(_A , _A , _A , _A )
# classify test sample
SCREAMING_SNAKE_CASE__ = [0, 0, 0, 1]
SCREAMING_SNAKE_CASE__ = self_organizing_map.get_winner(_A , _A )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 472 | 0 |
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
UpperCamelCase = str(bin(UpperCamelCase_ ) )[2:] # remove the leading "0b"
UpperCamelCase = str(bin(UpperCamelCase_ ) )[2:] # remove the leading "0b"
UpperCamelCase = max(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase_ ) , b_binary.zfill(UpperCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 537 | import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = 3
UpperCamelCase = 250
UpperCamelCase = ids_tensor((batch_size, length) , lowerCamelCase_ )
UpperCamelCase = torch.ones((batch_size, length) , device=lowerCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
UpperCamelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = MaxLengthCriteria(max_length=10 )
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
UpperCamelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCamelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
| 537 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def _UpperCAmelCase ( a : int , a : str=False ):
snake_case__ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("""head""" ):
snake_case__ = """segformer.encoder.""" + key
if key.startswith("""backbone""" ):
snake_case__ = key.replace("""backbone""" , """segformer.encoder""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
snake_case__ = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
snake_case__ = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(a )-1}''' )
if "norm" in key:
snake_case__ = key.replace("""norm""" , """layer_norm""" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
snake_case__ = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )]
snake_case__ = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(a )-1}''' )
if "layer_norm1" in key:
snake_case__ = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
snake_case__ = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
snake_case__ = key[key.find("""block""" ) + len("""block""" )]
snake_case__ = key.replace(F'''block{idx}''' , F'''block.{int(a )-1}''' )
if "attn.q" in key:
snake_case__ = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
snake_case__ = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
snake_case__ = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
snake_case__ = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
snake_case__ = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
snake_case__ = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
snake_case__ = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
snake_case__ = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
snake_case__ = key[key.find("""linear_c""" ) + len("""linear_c""" )]
snake_case__ = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(a )-1}''' )
if key.startswith("""head""" ):
snake_case__ = key.replace("""head""" , """classifier""" )
snake_case__ = value
return new_state_dict
def _UpperCAmelCase ( a : List[str] , a : Any ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
snake_case__ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
snake_case__ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
snake_case__ = kv_weight[
: config.hidden_sizes[i], :
]
snake_case__ = kv_bias[: config.hidden_sizes[i]]
snake_case__ = kv_weight[
config.hidden_sizes[i] :, :
]
snake_case__ = kv_bias[
config.hidden_sizes[i] :
]
def _UpperCAmelCase ( ):
snake_case__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ = Image.open(requests.get(a , stream=a ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( a : Optional[Any] , a : Optional[Any] , a : Any ):
snake_case__ = SegformerConfig()
snake_case__ = False
# set attributes based on model_name
snake_case__ = """huggingface/label-files"""
if "segformer" in model_name:
snake_case__ = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2]
if "ade" in model_name:
snake_case__ = 150
snake_case__ = """ade20k-id2label.json"""
snake_case__ = (1, 150, 128, 128)
elif "city" in model_name:
snake_case__ = 19
snake_case__ = """cityscapes-id2label.json"""
snake_case__ = (1, 19, 128, 128)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
snake_case__ = True
snake_case__ = model_name[4:6]
snake_case__ = 1000
snake_case__ = """imagenet-1k-id2label.json"""
snake_case__ = (1, 1000)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
snake_case__ = json.load(open(hf_hub_download(a , a , repo_type="""dataset""" ) , """r""" ) )
snake_case__ = {int(a ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
snake_case__ = [64, 128, 320, 512]
snake_case__ = 256
elif size == "b2":
snake_case__ = [64, 128, 320, 512]
snake_case__ = 768
snake_case__ = [3, 4, 6, 3]
elif size == "b3":
snake_case__ = [64, 128, 320, 512]
snake_case__ = 768
snake_case__ = [3, 4, 18, 3]
elif size == "b4":
snake_case__ = [64, 128, 320, 512]
snake_case__ = 768
snake_case__ = [3, 8, 27, 3]
elif size == "b5":
snake_case__ = [64, 128, 320, 512]
snake_case__ = 768
snake_case__ = [3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
snake_case__ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a , align=a , do_random_crop=a )
# prepare image
snake_case__ = prepare_img()
snake_case__ = image_processor(images=a , return_tensors="""pt""" ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
snake_case__ = torch.load(a , map_location=torch.device("""cpu""" ) )
else:
snake_case__ = torch.load(a , map_location=torch.device("""cpu""" ) )["""state_dict"""]
# rename keys
snake_case__ = rename_keys(a , encoder_only=a )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(a , a )
# create HuggingFace model and load state dict
if encoder_only:
snake_case__ = False
snake_case__ = SegformerForImageClassification(a )
else:
snake_case__ = SegformerForSemanticSegmentation(a )
model.load_state_dict(a )
model.eval()
# forward pass
snake_case__ = model(a )
snake_case__ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
snake_case__ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
snake_case__ = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
snake_case__ = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
snake_case__ = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
snake_case__ = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
snake_case__ = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
snake_case__ = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
snake_case__ = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
snake_case__ = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
snake_case__ = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
snake_case__ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
snake_case__ = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
snake_case__ = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
snake_case__ = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
snake_case__ = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
snake_case__ = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , a , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
a__ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 99 |
from math import factorial
a__ = {str(digit): factorial(digit) for digit in range(1_0)}
def _UpperCAmelCase ( a : int ):
if not isinstance(a , a ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(a ) )
def _UpperCAmelCase ( a : int = 60 , a : int = 100_0000 ):
if not isinstance(a , a ) or not isinstance(a , a ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
snake_case__ = 0
# the cached sizes of the previous chains
snake_case__ = {}
for start_chain_element in range(1 , a ):
# The temporary set will contain the elements of the chain
snake_case__ = set()
snake_case__ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
snake_case__ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(a )
chain_set_length += 1
snake_case__ = digit_factorial_sum(a )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
snake_case__ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution()}''')
| 99 | 1 |
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : str = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowerCamelCase_ ( _UpperCamelCase ) -> dict[str, str]:
"""simple docstring"""
snake_case_ : Tuple = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
snake_case_ : Optional[Any] = remove_duplicates(key.upper() )
snake_case_ : str = len(_UpperCamelCase )
# First fill cipher with key characters
snake_case_ : Any = {alphabet[i]: char for i, char in enumerate(_UpperCamelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_UpperCamelCase ) , 26 ):
snake_case_ : str = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
snake_case_ : Union[str, Any] = alphabet[i - offset]
snake_case_ : Optional[Any] = char
return cipher_alphabet
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
return "".join(cipher_map.get(_UpperCamelCase , _UpperCamelCase ) for ch in message.upper() )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : List[Any] = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_UpperCamelCase , _UpperCamelCase ) for ch in message.upper() )
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
snake_case_ : str = input('''Enter message to encode or decode: ''' ).strip()
snake_case_ : int = input('''Enter keyword: ''' ).strip()
snake_case_ : Optional[Any] = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
snake_case_ : Optional[int] = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
snake_case_ : Optional[int] = create_cipher_map(_UpperCamelCase )
print(func(_UpperCamelCase , _UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60 |
from __future__ import annotations
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = 2
lowerCamelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(UpperCAmelCase__ )
if n > 1:
factors.append(UpperCAmelCase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 623 | 0 |
'''simple docstring'''
def a_ ( __UpperCAmelCase ) -> list:
"""simple docstring"""
snake_case: List[Any] =len(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
snake_case , snake_case: int =arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
a = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 347 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a = logging.getLogger(__name__)
class a_ ( snake_case ):
UpperCAmelCase : Any = """sequence-classification"""
def __init__( self : int , a_ : str ) -> str:
if type(a_ ) == dict:
snake_case: List[Any] =Namespace(**a_ )
snake_case: Tuple =glue_output_modes[hparams.task]
snake_case: Any =glue_tasks_num_labels[hparams.task]
super().__init__(a_ , a_ , self.mode )
def UpperCamelCase ( self : Tuple , **a_ : Tuple ) -> Union[str, Any]:
return self.model(**a_ )
def UpperCamelCase ( self : int , a_ : Union[str, Any] , a_ : Optional[int] ) -> Optional[int]:
snake_case: Any ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
snake_case: Optional[int] =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
snake_case: Optional[int] =self(**a_ )
snake_case: Any =outputs[0]
snake_case: Union[str, Any] =self.trainer.lr_schedulers[0]['scheduler']
snake_case: str ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase ( self : str ) -> Tuple:
snake_case: int =self.hparams
snake_case: Union[str, Any] =processors[args.task]()
snake_case: Union[str, Any] =processor.get_labels()
for mode in ["train", "dev"]:
snake_case: Optional[Any] =self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , a_ )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
snake_case: int =(
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
snake_case: Tuple =convert_examples_to_features(
a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , a_ )
torch.save(a_ , a_ )
def UpperCamelCase ( self : List[Any] , a_ : str , a_ : int , a_ : bool = False ) -> DataLoader:
snake_case: List[Any] ='dev' if mode == 'test' else mode
snake_case: Union[str, Any] =self._feature_file(a_ )
logger.info('Loading features from cached file %s' , a_ )
snake_case: Dict =torch.load(a_ )
snake_case: Union[str, Any] =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
snake_case: List[Any] =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
snake_case: str =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
snake_case: Optional[Any] =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
snake_case: Union[str, Any] =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , )
def UpperCamelCase ( self : List[str] , a_ : Optional[int] , a_ : Any ) -> Dict:
snake_case: int ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
snake_case: Tuple =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
snake_case: List[str] =self(**a_ )
snake_case , snake_case: str =outputs[:2]
snake_case: Any =logits.detach().cpu().numpy()
snake_case: Union[str, Any] =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase ( self : int , a_ : Union[str, Any] ) -> tuple:
snake_case: Optional[Any] =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
snake_case: str =np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
snake_case: Union[str, Any] =np.argmax(a_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
snake_case: Optional[Any] =np.squeeze(a_ )
snake_case: Tuple =np.concatenate([x['target'] for x in outputs] , axis=0 )
snake_case: Any =[[] for _ in range(out_label_ids.shape[0] )]
snake_case: str =[[] for _ in range(out_label_ids.shape[0] )]
snake_case: int ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )}
snake_case: Union[str, Any] =dict(results.items() )
snake_case: Dict =results
return ret, preds_list, out_label_list
def UpperCamelCase ( self : str , a_ : list ) -> dict:
snake_case , snake_case , snake_case: Union[str, Any] =self._eval_end(a_ )
snake_case: Optional[Any] =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase ( self : Tuple , a_ : Tuple ) -> dict:
snake_case , snake_case , snake_case: int =self._eval_end(a_ )
snake_case: List[Any] =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase ( a_ : Optional[int] , a_ : Dict ) -> Tuple:
BaseTransformer.add_model_specific_args(a_ , a_ )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=a_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=a_ , required=a_ , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=a_ , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> Any:
"""simple docstring"""
snake_case: Tuple =argparse.ArgumentParser()
add_generic_args(__UpperCAmelCase , os.getcwd() )
snake_case: List[Any] =GLUETransformer.add_model_specific_args(__UpperCAmelCase , os.getcwd() )
snake_case: Optional[int] =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
snake_case: Optional[int] =os.path.join(
'./results' , f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
snake_case: str =GLUETransformer(__UpperCAmelCase )
snake_case: Tuple =generic_train(__UpperCAmelCase , __UpperCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
snake_case: str =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__UpperCAmelCase ) )
snake_case: int =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 347 | 1 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 328 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 328 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase_ ):
def __init__( self : Tuple , *_lowercase : Optional[int] , **_lowercase : int ):
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 709 |
"""simple docstring"""
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
while b:
A , A = b, a % b
return a
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(UpperCamelCase__ , a % b )
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 91 | 0 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def UpperCamelCase__( UpperCamelCase__ : str )->Any:
A__ = [False] * len(UpperCamelCase__ )
A__ = [-1] * len(UpperCamelCase__ )
def dfs(UpperCamelCase__ : List[Any] , UpperCamelCase__ : str ):
A__ = True
A__ = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase__ , 1 - c )
for i in range(len(UpperCamelCase__ ) ):
if not visited[i]:
dfs(UpperCamelCase__ , 0 )
for i in range(len(UpperCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
a__: Optional[Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 190 |
def UpperCamelCase__( UpperCamelCase__ : str = "The quick brown fox jumps over the lazy dog" , )->bool:
A__ = set()
# Replace all the whitespace in our sentence
A__ = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(UpperCamelCase__ ) == 26
def UpperCamelCase__( UpperCamelCase__ : str = "The quick brown fox jumps over the lazy dog" , )->bool:
A__ = [False] * 26
for char in input_str:
if char.islower():
A__ = True
elif char.isupper():
A__ = True
return all(UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : str = "The quick brown fox jumps over the lazy dog" , )->bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def UpperCamelCase__( )->None:
from timeit import timeit
A__ = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=UpperCamelCase__ ) )
print(timeit('''is_pangram_faster()''' , setup=UpperCamelCase__ ) )
print(timeit('''is_pangram_fastest()''' , setup=UpperCamelCase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 190 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase : Optional[int] = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 715 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowercase__ = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 5_0_2_5_7 , lowerCAmelCase_ : int = 1_0_2_4 , lowerCAmelCase_ : int = 7_6_8 , lowerCAmelCase_ : int = 1_2 , lowerCAmelCase_ : int = 1_2 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : str = "gelu_new" , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 1E-5 , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , ):
"""simple docstring"""
super().__init__()
lowercase_ = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''')
lowercase_ = prefix_inner_dim
lowercase_ = prefix_hidden_dim
lowercase_ = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim)
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowercase_ = (
nn.Linear(self.prefix_hidden_dim , lowerCAmelCase_) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowercase_ = GPTaConfig(
vocab_size=lowerCAmelCase_ , n_positions=lowerCAmelCase_ , n_embd=lowerCAmelCase_ , n_layer=lowerCAmelCase_ , n_head=lowerCAmelCase_ , n_inner=lowerCAmelCase_ , activation_function=lowerCAmelCase_ , resid_pdrop=lowerCAmelCase_ , embd_pdrop=lowerCAmelCase_ , attn_pdrop=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , initializer_range=lowerCAmelCase_ , scale_attn_weights=lowerCAmelCase_ , use_cache=lowerCAmelCase_ , scale_attn_by_inverse_layer_idx=lowerCAmelCase_ , reorder_and_upcast_attn=lowerCAmelCase_ , )
lowercase_ = GPTaLMHeadModel(lowerCAmelCase_)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : torch.Tensor , lowerCAmelCase_ : torch.Tensor , lowerCAmelCase_ : Optional[torch.Tensor] = None , lowerCAmelCase_ : Optional[torch.Tensor] = None , ):
"""simple docstring"""
lowercase_ = self.transformer.transformer.wte(lowerCAmelCase_)
lowercase_ = self.encode_prefix(lowerCAmelCase_)
lowercase_ = self.decode_prefix(lowerCAmelCase_)
lowercase_ = torch.cat((prefix_embeds, embedding_text) , dim=1)
if labels is not None:
lowercase_ = self.get_dummy_token(input_ids.shape[0] , input_ids.device)
lowercase_ = torch.cat((dummy_token, input_ids) , dim=1)
lowercase_ = self.transformer(inputs_embeds=lowerCAmelCase_ , labels=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : torch.device):
"""simple docstring"""
return torch.zeros(lowerCAmelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
return self.encode_prefix(lowerCAmelCase_)
@torch.no_grad()
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = torch.split(lowerCAmelCase_ , 1 , dim=0)
lowercase_ = []
lowercase_ = []
for feature in features:
lowercase_ = self.decode_prefix(feature.to(lowerCAmelCase_)) # back to the clip feature
# Only support beam search for now
lowercase_ , lowercase_ = self.generate_beam(
input_embeds=lowerCAmelCase_ , device=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_)
generated_tokens.append(output_tokens[0])
generated_seq_lengths.append(seq_lengths[0])
lowercase_ = torch.stack(lowerCAmelCase_)
lowercase_ = torch.stack(lowerCAmelCase_)
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int = 5 , lowerCAmelCase_ : int = 6_7 , lowerCAmelCase_ : float = 1.0 , lowerCAmelCase_ : Optional[int] = None , ):
"""simple docstring"""
lowercase_ = eos_token_id
lowercase_ = None
lowercase_ = None
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=torch.int)
lowercase_ = torch.zeros(lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=torch.bool)
if input_embeds is not None:
lowercase_ = input_embeds
else:
lowercase_ = self.transformer.transformer.wte(lowerCAmelCase_)
for i in range(lowerCAmelCase_):
lowercase_ = self.transformer(inputs_embeds=lowerCAmelCase_)
lowercase_ = outputs.logits
lowercase_ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowercase_ = logits.softmax(-1).log()
if scores is None:
lowercase_ , lowercase_ = logits.topk(lowerCAmelCase_ , -1)
lowercase_ = generated.expand(lowerCAmelCase_ , *generated.shape[1:])
lowercase_ , lowercase_ = next_tokens.permute(1 , 0), scores.squeeze(0)
if tokens is None:
lowercase_ = next_tokens
else:
lowercase_ = tokens.expand(lowerCAmelCase_ , *tokens.shape[1:])
lowercase_ = torch.cat((tokens, next_tokens) , dim=1)
else:
lowercase_ = -float(np.inf)
lowercase_ = 0
lowercase_ = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowercase_ = scores_sum / seq_lengths[:, None]
lowercase_ , lowercase_ = scores_sum_average.view(-1).topk(lowerCAmelCase_ , -1)
lowercase_ = next_tokens // scores_sum.shape[1]
lowercase_ = seq_lengths[next_tokens_source]
lowercase_ = next_tokens % scores_sum.shape[1]
lowercase_ = next_tokens.unsqueeze(1)
lowercase_ = tokens[next_tokens_source]
lowercase_ = torch.cat((tokens, next_tokens) , dim=1)
lowercase_ = generated[next_tokens_source]
lowercase_ = scores_sum_average * seq_lengths
lowercase_ = is_stopped[next_tokens_source]
lowercase_ = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0] , 1 , -1)
lowercase_ = torch.cat((generated, next_token_embed) , dim=1)
lowercase_ = is_stopped + next_tokens.eq(lowerCAmelCase_).squeeze()
if is_stopped.all():
break
lowercase_ = scores / seq_lengths
lowercase_ = scores.argsort(descending=lowerCAmelCase_)
# tokens tensors are already padded to max_seq_length
lowercase_ = [tokens[i] for i in order]
lowercase_ = torch.stack(lowerCAmelCase_ , dim=0)
lowercase_ = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype)
return output_texts, seq_lengths
| 100 | 0 |
from __future__ import annotations
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : int = str(lowerCAmelCase_)
return n == n[::-1]
def __magic_name__ ( lowerCAmelCase_ = 100_0000):
'''simple docstring'''
lowerCamelCase_ : Dict = 0
for i in range(1 , lowerCAmelCase_):
if is_palindrome(lowerCAmelCase_) and is_palindrome(bin(lowerCAmelCase_).split("b")[1]):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 250 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__magic_name__ = 1.054_571_817E-34 # unit of ℏ : J * s
__magic_name__ = 3E8 # unit of c : m * s^-1
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if (force, area, distance).count(0) != 1:
raise ValueError("One and only one argument must be 0")
if force < 0:
raise ValueError("Magnitude of force can not be negative")
if distance < 0:
raise ValueError("Distance can not be negative")
if area < 0:
raise ValueError("Area can not be negative")
if force == 0:
lowerCamelCase_ : Dict = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCamelCase_ : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCamelCase_ : Any = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0")
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Dict = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 704 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : Tuple , *__lowerCamelCase : List[str] , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Optional[int] ) -> List[str]:
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = eval_examples
SCREAMING_SNAKE_CASE__ = post_process_function
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : Dict=None , __lowerCamelCase : int=None , __lowerCamelCase : str=None , __lowerCamelCase : str = "eval" ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE__ = self.get_eval_dataloader(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE__ = self.compute_metrics
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
SCREAMING_SNAKE_CASE__ = time.time()
try:
SCREAMING_SNAKE_CASE__ = eval_loop(
__lowerCamelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCamelCase , metric_key_prefix=__lowerCamelCase , )
finally:
SCREAMING_SNAKE_CASE__ = compute_metrics
SCREAMING_SNAKE_CASE__ = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__lowerCamelCase , __lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
SCREAMING_SNAKE_CASE__ = self.post_process_function(__lowerCamelCase , __lowerCamelCase , output.predictions )
SCREAMING_SNAKE_CASE__ = self.compute_metrics(__lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
SCREAMING_SNAKE_CASE__ = metrics.pop(__lowerCamelCase )
metrics.update(output.metrics )
else:
SCREAMING_SNAKE_CASE__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCamelCase )
return metrics
def lowercase_ ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : str=None , __lowerCamelCase : str = "test" ) -> Any:
SCREAMING_SNAKE_CASE__ = self.get_test_dataloader(__lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE__ = self.compute_metrics
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
SCREAMING_SNAKE_CASE__ = time.time()
try:
SCREAMING_SNAKE_CASE__ = eval_loop(
__lowerCamelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCamelCase , metric_key_prefix=__lowerCamelCase , )
finally:
SCREAMING_SNAKE_CASE__ = compute_metrics
SCREAMING_SNAKE_CASE__ = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__lowerCamelCase , __lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE__ = self.post_process_function(__lowerCamelCase , __lowerCamelCase , output.predictions , '''predict''' )
SCREAMING_SNAKE_CASE__ = self.compute_metrics(__lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
SCREAMING_SNAKE_CASE__ = metrics.pop(__lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCamelCase )
| 472 | 0 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCamelCase__ : List[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
lowerCamelCase__ : Dict = {
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.15},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
lowerCamelCase__ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ : List[str] = """facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
lowerCamelCase__ : Optional[Any] = """allenai"""
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : Any = dict((re.sub(R"""@@$""" , """""" , lowercase_ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , lowercase_ ), v) for k, v in d.items() )
lowercase__ : List[str] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
lowercase__ : int = d[k] # restore
return da
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
assert os.path.exists(lowercase_ )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
lowercase__ : int = basename(lowercase_ )
lowercase__ : List[Any] = dirname(lowercase_ )
lowercase__ : List[str] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowercase__ : int = cls.hub_models()
lowercase__ : Tuple = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
lowercase__ : Optional[Any] = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'using checkpoint {checkpoint_file}' )
lowercase__ : Optional[int] = hub_utils.from_pretrained(
lowercase_ , lowercase_ , lowercase_ , archive_map=lowercase_ , **lowercase_ )
lowercase__ : Tuple = vars(chkpt["""args"""]["""model"""] )
lowercase__ : List[Any] = args["""source_lang"""]
lowercase__ : Dict = args["""target_lang"""]
lowercase__ : Optional[Any] = dirname(lowercase_ )
lowercase__ : int = basename(lowercase_ )
# dicts
lowercase__ : Union[str, Any] = os.path.join(lowercase_ , F'dict.{src_lang}.txt' )
lowercase__ : Optional[int] = os.path.join(lowercase_ , F'dict.{tgt_lang}.txt' )
lowercase__ : Optional[int] = Dictionary.load(lowercase_ )
lowercase__ : Union[str, Any] = rewrite_dict_keys(src_dict.indices )
lowercase__ : str = len(lowercase_ )
lowercase__ : Any = os.path.join(lowercase_ , """vocab-src.json""" )
print(F'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowercase__ : Any = True
for k in src_vocab.keys():
if not k.islower():
lowercase__ : List[str] = False
break
lowercase__ : Tuple = Dictionary.load(lowercase_ )
lowercase__ : Any = rewrite_dict_keys(tgt_dict.indices )
lowercase__ : Any = len(lowercase_ )
lowercase__ : Any = os.path.join(lowercase_ , """vocab-tgt.json""" )
print(F'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# merges_file (bpecodes)
lowercase__ : Union[str, Any] = os.path.join(lowercase_ , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowercase__ : Optional[int] = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ):
break
with open(lowercase_ , encoding="""utf-8""" ) as fin:
lowercase__ : List[Any] = fin.read()
lowercase__ : Optional[Any] = re.sub(R""" \d+$""" , """""" , lowercase_ , 0 , re.M ) # remove frequency number
print(F'Generating {merges_file}' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as fout:
fout.write(lowercase_ )
# model config
lowercase__ : Tuple = os.path.join(lowercase_ , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", F'need to extend tokenizer to support bpe={args["tokenizer"]}'
lowercase__ : List[str] = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
lowercase__ : Optional[int] = 5
lowercase__ : List[str] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowercase__ : Optional[int] = best_score_hparams[model_dir]["""length_penalty"""]
else:
lowercase__ : Union[str, Any] = 1.0
print(F'Generating {fsmt_model_config_file}' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# tokenizer config
lowercase__ : Optional[int] = os.path.join(lowercase_ , lowercase_ )
lowercase__ : int = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 10_24,
"""do_lower_case""": do_lower_case,
}
print(F'Generating {fsmt_tokenizer_config_file}' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# model
lowercase__ : Dict = chkpt["""models"""][0]
lowercase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowercase__ : Any = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowercase__ : List[Any] = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(lowercase_ , lowercase_ )
lowercase__ : str = FSMTConfig.from_pretrained(lowercase_ )
lowercase__ : List[str] = FSMTForConditionalGeneration(lowercase_ )
# check that it loads ok
model_new.load_state_dict(lowercase_ , strict=lowercase_ )
# save
lowercase__ : str = os.path.join(lowercase_ , lowercase_ )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowercase_ , lowercase_ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(F'cd {data_root}' )
print(F'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase__ : List[str] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 12 |
from __future__ import annotations
class snake_case__ :
def __init__( self , UpperCamelCase_ ) -> None:
"""simple docstring"""
a_ : Dict = order
# a_{0} ... a_{k}
a_ : Union[str, Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
a_ : str = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
a_ : Tuple = [0.0] * self.order
# y[n-1] ... y[n-k]
a_ : List[Any] = [0.0] * self.order
def A ( self , UpperCamelCase_ , UpperCamelCase_ ) -> None:
"""simple docstring"""
if len(UpperCamelCase_ ) < self.order:
a_ : Optional[int] = [1.0, *a_coeffs]
if len(UpperCamelCase_ ) != self.order + 1:
a_ : Optional[Any] = (
f"""Expected a_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(UpperCamelCase_ )}"""
)
raise ValueError(UpperCamelCase_ )
if len(UpperCamelCase_ ) != self.order + 1:
a_ : Tuple = (
f"""Expected b_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(UpperCamelCase_ )}"""
)
raise ValueError(UpperCamelCase_ )
a_ : Any = a_coeffs
a_ : Tuple = b_coeffs
def A ( self , UpperCamelCase_ ) -> float:
"""simple docstring"""
a_ : List[str] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
a_ : Any = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
a_ : Union[str, Any] = self.input_history[:-1]
a_ : Dict = self.output_history[:-1]
a_ : List[str] = sample
a_ : int = result
return result
| 419 | 0 |
lowerCAmelCase = 256
# Modulus to hash a string
lowerCAmelCase = 1_000_003
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : List[str] = len(lowercase_ )
__UpperCAmelCase : Tuple = len(lowercase_ )
if p_len > t_len:
return False
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase_ ):
__UpperCAmelCase : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCAmelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCAmelCase : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCAmelCase : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''abc1abc12'''
__UpperCAmelCase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__UpperCAmelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowercase_ , lowercase_ ) and not rabin_karp(lowercase_ , lowercase_ )
# Test 2)
__UpperCAmelCase : Union[str, Any] = '''ABABX'''
__UpperCAmelCase : List[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 3)
__UpperCAmelCase : str = '''AAAB'''
__UpperCAmelCase : List[Any] = '''ABAAAAAB'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 4)
__UpperCAmelCase : Optional[Any] = '''abcdabcy'''
__UpperCAmelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 5)
__UpperCAmelCase : Any = '''Lü'''
__UpperCAmelCase : Optional[int] = '''Lüsai'''
assert rabin_karp(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = '''Lue'''
assert not rabin_karp(lowercase_ , lowercase_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 675 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __SCREAMING_SNAKE_CASE ( lowercase_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__UpperCAmelCase : str = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__UpperCAmelCase : List[str] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 675 | 1 |
"""simple docstring"""
import cva
import numpy as np
class a__ :
def __init__( self :Optional[int] , _lowerCamelCase :Dict , _lowerCamelCase :int ):
'''simple docstring'''
if k in (0.04, 0.06):
UpperCamelCase_ : Optional[Any] =k
UpperCamelCase_ : Optional[Any] =window_size
else:
raise ValueError('invalid k value' )
def __str__( self :Tuple ):
'''simple docstring'''
return str(self.k )
def lowerCamelCase_ ( self :str , _lowerCamelCase :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] =cva.imread(lowercase_ , 0 )
UpperCamelCase_ , UpperCamelCase_ : List[Any] =img.shape
UpperCamelCase_ : Union[str, Any] =[]
UpperCamelCase_ : Union[str, Any] =img.copy()
UpperCamelCase_ : Any =cva.cvtColor(lowercase_ , cva.COLOR_GRAY2RGB )
UpperCamelCase_ , UpperCamelCase_ : Dict =np.gradient(lowercase_ )
UpperCamelCase_ : Any =dx**2
UpperCamelCase_ : Tuple =dy**2
UpperCamelCase_ : Union[str, Any] =dx * dy
UpperCamelCase_ : List[str] =0.04
UpperCamelCase_ : Any =self.window_size // 2
for y in range(lowercase_ , h - offset ):
for x in range(lowercase_ , w - offset ):
UpperCamelCase_ : List[str] =ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_ : List[str] =iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_ : List[Any] =ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_ : Optional[int] =(wxx * wyy) - (wxy**2)
UpperCamelCase_ : List[Any] =wxx + wyy
UpperCamelCase_ : int =det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = HarrisCorner(0.04, 3)
__SCREAMING_SNAKE_CASE = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 357 |
from ...processing_utils import ProcessorMixin
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''SpeechT5FeatureExtractor'''
__UpperCAmelCase = '''SpeechT5Tokenizer'''
def __init__( self , lowercase_ , lowercase_) -> Optional[int]:
super().__init__(lowercase_ , lowercase_)
def __call__( self , *lowercase_ , **lowercase_) -> int:
__snake_case = kwargs.pop('audio' , lowercase_)
__snake_case = kwargs.pop('text' , lowercase_)
__snake_case = kwargs.pop('text_target' , lowercase_)
__snake_case = kwargs.pop('audio_target' , lowercase_)
__snake_case = kwargs.pop('sampling_rate' , lowercase_)
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?')
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.')
if audio is not None:
__snake_case = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
elif text is not None:
__snake_case = self.tokenizer(lowercase_ , **lowercase_)
else:
__snake_case = None
if audio_target is not None:
__snake_case = self.feature_extractor(audio_target=lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
__snake_case = targets['input_values']
elif text_target is not None:
__snake_case = self.tokenizer(lowercase_ , **lowercase_)
__snake_case = targets['input_ids']
else:
__snake_case = None
if inputs is None:
return targets
if targets is not None:
__snake_case = labels
__snake_case = targets.get('attention_mask')
if decoder_attention_mask is not None:
__snake_case = decoder_attention_mask
return inputs
def _a ( self , *lowercase_ , **lowercase_) -> int:
__snake_case = kwargs.pop('input_values' , lowercase_)
__snake_case = kwargs.pop('input_ids' , lowercase_)
__snake_case = kwargs.pop('labels' , lowercase_)
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.')
if input_values is not None:
__snake_case = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
elif input_ids is not None:
__snake_case = self.tokenizer.pad(lowercase_ , **lowercase_)
else:
__snake_case = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowercase_ , lowercase_) and "input_ids" in labels[0]):
__snake_case = self.tokenizer.pad(lowercase_ , **lowercase_)
__snake_case = targets['input_ids']
else:
__snake_case = self.feature_extractor.feature_size
__snake_case = self.feature_extractor.num_mel_bins
__snake_case = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
__snake_case = feature_size_hack
__snake_case = targets['input_values']
else:
__snake_case = None
if inputs is None:
return targets
if targets is not None:
__snake_case = labels
__snake_case = targets.get('attention_mask')
if decoder_attention_mask is not None:
__snake_case = decoder_attention_mask
return inputs
def _a ( self , *lowercase_ , **lowercase_) -> List[str]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _a ( self , *lowercase_ , **lowercase_) -> List[Any]:
return self.tokenizer.decode(*lowercase_ , **lowercase_)
| 313 | 0 |
import math
import tensorflow as tf
from packaging import version
def lowercase_ (A : Optional[int] ):
snake_case__ : Optional[Any] = tf.convert_to_tensor(A )
snake_case__ : Any = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase_ (A : Any ):
snake_case__ : Optional[int] = tf.convert_to_tensor(A )
snake_case__ : List[Any] = tf.cast(math.pi , x.dtype )
snake_case__ : int = tf.cast(0.044715 , x.dtype )
snake_case__ : Union[str, Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(A , 3 )) ))
return x * cdf
def lowercase_ (A : str ):
snake_case__ : str = tf.convert_to_tensor(A )
return x * tf.tanh(tf.math.softplus(A ) )
def lowercase_ (A : List[Any] ):
snake_case__ : Tuple = tf.convert_to_tensor(A )
snake_case__ : str = tf.cast(0.044715 , x.dtype )
snake_case__ : List[Any] = tf.cast(0.7978845608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase_ (A : Tuple ):
snake_case__ : int = tf.convert_to_tensor(A )
snake_case__ : str = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase_ (A : Any ):
return tf.clip_by_value(_gelu(A ) , -1_0 , 1_0 )
def lowercase_ (A : str , A : Dict=-1 ):
snake_case__ : Optional[int] = tf.split(A , 2 , axis=A )
return a * tf.math.sigmoid(A )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase_ (A : List[str] ):
return tf.keras.activations.gelu(A , approximate=A )
a_ :Any = tf.keras.activations.gelu
a_ :Optional[Any] = approximate_gelu_wrap
else:
a_ :Any = _gelu
a_ :Union[str, Any] = _gelu_new
a_ :Dict = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase_ (A : Dict ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 705 |
def lowercase_ (A : list[int] ):
snake_case__ : Tuple = []
if len(A ) == 1:
return [nums.copy()]
for _ in range(len(A ) ):
snake_case__ : str = nums.pop(0 )
snake_case__ : Optional[int] = permute(A )
for perm in permutations:
perm.append(A )
result.extend(A )
nums.append(A )
return result
def lowercase_ (A : Any ):
def backtrack(A : List[Any] ):
if start == len(A ) - 1:
output.append(nums[:] )
else:
for i in range(A , len(A ) ):
snake_case__ , snake_case__ : Optional[Any] = nums[i], nums[start]
backtrack(start + 1 )
snake_case__ , snake_case__ : Union[str, Any] = nums[i], nums[start] # backtrack
snake_case__ : Any = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
a_ :List[str] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 243 | 0 |
'''simple docstring'''
import copy
import re
class a__:
a_ : Any = '''hp'''
a_ : Optional[Any] = {}
a_ : str = None
@classmethod
def _lowercase ( cls , _UpperCAmelCase , _UpperCAmelCase ) -> str:
snake_case__ =prefix
snake_case__ =defaults
cls.build_naming_info()
@staticmethod
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
if len(_UpperCAmelCase ) == 0:
return ""
snake_case__ =None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(_UpperCAmelCase ) + 1 ):
snake_case__ =word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
snake_case__ =prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(_UpperCAmelCase ):
snake_case__ =''
while integer != 0:
snake_case__ =chr(ord('A' ) + integer % 10 ) + s
integer //= 10
return s
snake_case__ =0
while True:
snake_case__ =word + '#' + int_to_alphabetic(_UpperCAmelCase )
if sword in info["reverse_short_word"]:
continue
else:
snake_case__ =sword
break
snake_case__ =short_word
snake_case__ =word
return short_word
@staticmethod
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
snake_case__ =param_name.split('_' )
snake_case__ =[TrialShortNamer.shortname_for_word(_UpperCAmelCase , _UpperCAmelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
snake_case__ =['', '_']
for separator in separators:
snake_case__ =separator.join(_UpperCAmelCase )
if shortname not in info["reverse_short_param"]:
snake_case__ =shortname
snake_case__ =param_name
return shortname
return param_name
@staticmethod
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
snake_case__ =TrialShortNamer.shortname_for_key(_UpperCAmelCase , _UpperCAmelCase )
snake_case__ =short_name
snake_case__ =param_name
@classmethod
def _lowercase ( cls ) -> str:
if cls.NAMING_INFO is not None:
return
snake_case__ ={
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
snake_case__ =list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_UpperCAmelCase , _UpperCAmelCase )
snake_case__ =info
@classmethod
def _lowercase ( cls , _UpperCAmelCase ) -> List[Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
snake_case__ =[copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
snake_case__ =cls.NAMING_INFO['short_param'][k]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
snake_case__ =1 if v else 0
snake_case__ ='' if isinstance(_UpperCAmelCase , (int, float) ) else '-'
snake_case__ =f"""{key}{sep}{v}"""
name.append(_UpperCAmelCase )
return "_".join(_UpperCAmelCase )
@classmethod
def _lowercase ( cls , _UpperCAmelCase ) -> Optional[Any]:
snake_case__ =repr[len(cls.PREFIX ) + 1 :]
if repr == "":
snake_case__ =[]
else:
snake_case__ =repr.split('_' )
snake_case__ ={}
for value in values:
if "-" in value:
snake_case__ , snake_case__ =value.split('-' )
else:
snake_case__ =re.sub('[0-9.]' , '' , _UpperCAmelCase )
snake_case__ =float(re.sub('[^0-9.]' , '' , _UpperCAmelCase ) )
snake_case__ =cls.NAMING_INFO['reverse_short_param'][p_k]
snake_case__ =p_v
for k in cls.DEFAULTS:
if k not in parameters:
snake_case__ =cls.DEFAULTS[k]
return parameters
| 538 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
SCREAMING_SNAKE_CASE__ : Optional[int] = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
SCREAMING_SNAKE_CASE__ : Any = get_tests_dir('''fixtures/vocab.json''')
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_tests_dir('''fixtures''')
class a__( unittest.TestCase ):
a_ : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def _lowercase ( self ) -> Dict:
snake_case__ =0
def _lowercase ( self ) -> List[Any]:
snake_case__ =AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ =WavaVecaConfig()
snake_case__ =AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
# save in new folder
model_config.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
snake_case__ =AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
copyfile(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'vocab.json' ) )
snake_case__ =AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ =WavaVecaFeatureExtractor()
snake_case__ =AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
snake_case__ =WavaVecaProcessor(_UpperCAmelCase , _UpperCAmelCase )
# save in new folder
processor.save_pretrained(_UpperCAmelCase )
# drop `processor_class` in tokenizer
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , 'r' ) as f:
snake_case__ =json.load(_UpperCAmelCase )
config_dict.pop('processor_class' )
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , 'w' ) as f:
f.write(json.dumps(_UpperCAmelCase ) )
snake_case__ =AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ =WavaVecaFeatureExtractor()
snake_case__ =AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
snake_case__ =WavaVecaProcessor(_UpperCAmelCase , _UpperCAmelCase )
# save in new folder
processor.save_pretrained(_UpperCAmelCase )
# drop `processor_class` in feature extractor
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , 'r' ) as f:
snake_case__ =json.load(_UpperCAmelCase )
config_dict.pop('processor_class' )
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , 'w' ) as f:
f.write(json.dumps(_UpperCAmelCase ) )
snake_case__ =AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ =WavaVecaConfig(processor_class='Wav2Vec2Processor' )
model_config.save_pretrained(_UpperCAmelCase )
# copy relevant files
copyfile(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'vocab.json' ) )
# create emtpy sample processor
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , 'w' ) as f:
f.write('{}' )
snake_case__ =AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( self ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_UpperCAmelCase ):
snake_case__ =AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCAmelCase ):
snake_case__ =AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=_UpperCAmelCase )
snake_case__ =AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' , trust_remote_code=_UpperCAmelCase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
snake_case__ =processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
snake_case__ =processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
snake_case__ =AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase )
snake_case__ =new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def _lowercase ( self ) -> Optional[Any]:
try:
AutoConfig.register('custom' , _UpperCAmelCase )
AutoFeatureExtractor.register(_UpperCAmelCase , _UpperCAmelCase )
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase )
AutoProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase ):
AutoProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case__ =CustomFeatureExtractor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ =os.path.join(_UpperCAmelCase , 'vocab.txt' )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
snake_case__ =CustomTokenizer(_UpperCAmelCase )
snake_case__ =CustomProcessor(_UpperCAmelCase , _UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(_UpperCAmelCase )
snake_case__ =AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowercase ( self ) -> Union[str, Any]:
class a__( snake_case__ ):
a_ : Optional[int] = False
class a__( snake_case__ ):
a_ : List[Any] = False
class a__( snake_case__ ):
a_ : int = '''AutoFeatureExtractor'''
a_ : int = '''AutoTokenizer'''
a_ : List[str] = False
try:
AutoConfig.register('custom' , _UpperCAmelCase )
AutoFeatureExtractor.register(_UpperCAmelCase , _UpperCAmelCase )
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase )
AutoProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# If remote code is not set, the default is to use local classes.
snake_case__ =AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
snake_case__ =AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=_UpperCAmelCase )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
snake_case__ =AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=_UpperCAmelCase )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ =AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(processor.__class__.__name__ , 'BertTokenizerFast' )
def _lowercase ( self ) -> List[Any]:
snake_case__ =AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' )
self.assertEqual(processor.__class__.__name__ , 'ConvNextImageProcessor' )
@is_staging_test
class a__( unittest.TestCase ):
a_ : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def _lowercase ( cls ) -> str:
snake_case__ =TOKEN
HfFolder.save_token(_UpperCAmelCase )
@classmethod
def _lowercase ( cls ) -> int:
try:
delete_repo(token=cls._token , repo_id='test-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-processor' )
except HTTPError:
pass
def _lowercase ( self ) -> int:
snake_case__ =WavaVecaProcessor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_UpperCAmelCase , 'test-processor' ) , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
snake_case__ =WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(new_processor.feature_extractor , _UpperCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _lowercase ( self ) -> List[str]:
snake_case__ =WavaVecaProcessor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_UpperCAmelCase , 'test-processor-org' ) , push_to_hub=_UpperCAmelCase , use_auth_token=self._token , organization='valid_org' , )
snake_case__ =WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(new_processor.feature_extractor , _UpperCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _lowercase ( self ) -> Any:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
snake_case__ =CustomFeatureExtractor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ =os.path.join(_UpperCAmelCase , 'vocab.txt' )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
snake_case__ =CustomTokenizer(_UpperCAmelCase )
snake_case__ =CustomProcessor(_UpperCAmelCase , _UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
snake_case__ =Repository(_UpperCAmelCase , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(_UpperCAmelCase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor',
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) ) as f:
snake_case__ =json.load(_UpperCAmelCase )
self.assertDictEqual(
tokenizer_config['auto_map'] , {
'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None],
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , 'custom_feature_extraction.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , 'custom_tokenization.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , 'custom_processing.py' ) ) )
repo.push_to_hub()
snake_case__ =AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=_UpperCAmelCase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , 'CustomProcessor' )
| 538 | 1 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__lowerCAmelCase = logging.get_logger(__name__)
def snake_case_ ( snake_case , snake_case ) -> Dict:
lowercase__: Any = set()
lowercase__: Tuple = []
def parse_line(snake_case ):
for line in fp:
if isinstance(snake_case , snake_case ):
lowercase__: int = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(snake_case ) > 0:
lowercase__: Optional[Any] = '\n'.join(snake_case )
# Only keep the warnings specified in `targets`
if any(f': {x}: ' in warning for x in targets ):
selected_warnings.add(snake_case )
buffer.clear()
continue
else:
lowercase__: Dict = line.strip()
buffer.append(snake_case )
if from_gh:
for filename in os.listdir(snake_case ):
lowercase__: str = os.path.join(snake_case , snake_case )
if not os.path.isdir(snake_case ):
# read the file
if filename != "warnings.txt":
continue
with open(snake_case ) as fp:
parse_line(snake_case )
else:
try:
with zipfile.ZipFile(snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case ):
# read the file
if filename != "warnings.txt":
continue
with z.open(snake_case ) as fp:
parse_line(snake_case )
except Exception:
logger.warning(
f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case_ ( snake_case , snake_case ) -> Union[str, Any]:
lowercase__: Union[str, Any] = set()
lowercase__: Optional[Any] = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) )
return selected_warnings
if __name__ == "__main__":
def snake_case_ ( snake_case ) -> str:
return values.split(',' )
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__lowerCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__lowerCAmelCase = extract_warnings(args.output_dir, args.targets)
__lowerCAmelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 704 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: '''
__lowerCAmelCase = '''=======
>>>>>>>
'''
__lowerCAmelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
__lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(r'''tfds\.core''', r'''datasets'''),
(r'''tf\.io\.gfile\.GFile''', r'''open'''),
(r'''tf\.([\w\d]+)''', r'''datasets.Value(\'\1\')'''),
(r'''tfds\.features\.Text\(\)''', r'''datasets.Value(\'string\')'''),
(r'''tfds\.features\.Text\(''', r'''datasets.Value(\'string\'),'''),
(r'''features\s*=\s*tfds.features.FeaturesDict\(''', r'''features=datasets.Features('''),
(r'''tfds\.features\.FeaturesDict\(''', r'''dict('''),
(r'''The TensorFlow Datasets Authors''', r'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(r'''tfds\.''', r'''datasets.'''),
(r'''dl_manager\.manual_dir''', r'''self.config.data_dir'''),
(r'''self\.builder_config''', r'''self.config'''),
]
def snake_case_ ( snake_case ) -> Union[str, Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __a ( __UpperCamelCase ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[str] = parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: Tuple = get_logger('datasets-cli/converting' )
lowercase__: Any = tfds_path
lowercase__: str = datasets_directory
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
lowercase__: int = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__: Optional[int] = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
lowercase__: str = os.path.abspath(self._datasets_directory )
self._logger.info(F'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
lowercase__: Union[str, Any] = []
lowercase__: List[str] = []
lowercase__: str = {}
if os.path.isdir(self._tfds_path ):
lowercase__: List[Any] = os.listdir(lowerCAmelCase__ )
else:
lowercase__: Optional[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'Looking at file {f_name}' )
lowercase__: List[str] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: int = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if not os.path.isfile(lowerCAmelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase__ , encoding='utf-8' ) as f:
lowercase__: Any = f.readlines()
lowercase__: List[str] = []
lowercase__: List[Any] = False
lowercase__: Any = False
lowercase__: Dict = []
for line in lines:
lowercase__: Tuple = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__: List[Any] = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
lowercase__: Optional[Any] = ''
continue
elif "from absl import logging" in out_line:
lowercase__: str = 'from datasets import logging\n'
elif "getLogger" in out_line:
lowercase__: Dict = out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__: Tuple = True
lowercase__: int = list(filter(lambda lowerCAmelCase__ : e in out_line , lowerCAmelCase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase__ ) + '\n' )
out_lines.append(lowerCAmelCase__ )
out_lines.append(lowerCAmelCase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__: Any = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__: Tuple = re.match(R'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
lowercase__: Dict = 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__: List[str] = True
out_lines.append(lowerCAmelCase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__: Dict = f_name.replace('.py' , '' )
lowercase__: Optional[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: Any = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
self._logger.info(F'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase__ )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase__ )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase__ )
self._logger.info(F'Converted in {output_file}' )
for utils_file in utils_files:
try:
lowercase__: str = os.path.basename(lowerCAmelCase__ )
lowercase__: int = imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(F'Moving {dest_folder} to {utils_file}' )
shutil.copy(lowerCAmelCase__ , lowerCAmelCase__ )
except KeyError:
self._logger.error(F'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 335 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 109 |
"""simple docstring"""
import numpy as np
def A_ ( snake_case_ : Tuple ,snake_case_ : Any ,snake_case_ : str ,snake_case_ : Optional[int] ,snake_case_ : List[str] ):
'''simple docstring'''
UpperCamelCase : int = int(np.ceil((x_end - xa) / h ) )
UpperCamelCase : Dict = np.zeros((n + 1,) )
UpperCamelCase : Optional[int] = ya
UpperCamelCase : Optional[Any] = xa
for k in range(snake_case_ ):
UpperCamelCase : Optional[Any] = f(snake_case_ ,y[k] )
UpperCamelCase : Optional[Any] = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
UpperCamelCase : Optional[Any] = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
UpperCamelCase : Optional[int] = f(x + h ,y[k] + h * ka )
UpperCamelCase : Tuple = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 499 | 0 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
lowerCAmelCase_ = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
lowerCAmelCase_ = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
lowerCAmelCase_ = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) ,reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] ,)
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Tuple ,_snake_case : int ,_snake_case : Optional[Any]=False ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = spearmanr(_snake_case ,_snake_case )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 122 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A_ )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = field(default="language-modeling" ,metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase : ClassVar[Features] = Features({"text": Value("string" )} )
lowerCAmelCase : ClassVar[Features] = Features({} )
lowerCAmelCase : str = "text"
@property
def UpperCAmelCase ( self : Tuple ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 122 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE_ = {
'''google/electra-small-generator''': 512,
'''google/electra-base-generator''': 512,
'''google/electra-large-generator''': 512,
'''google/electra-small-discriminator''': 512,
'''google/electra-base-discriminator''': 512,
'''google/electra-large-discriminator''': 512,
}
SCREAMING_SNAKE_CASE_ = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : Any = VOCAB_FILES_NAMES
A__ : int = PRETRAINED_VOCAB_FILES_MAP
A__ : Dict = PRETRAINED_INIT_CONFIGURATION
A__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Union[str, Any] = ElectraTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case_ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case_ ) != tokenize_chinese_chars
):
_UpperCAmelCase = getattr(snake_case_ , normalizer_state.pop("type" ) )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = strip_accents
_UpperCAmelCase = tokenize_chinese_chars
_UpperCAmelCase = normalizer_class(**snake_case_ )
_UpperCAmelCase = do_lower_case
def __A ( self , snake_case_ , snake_case_=None ) -> Optional[Any]:
_UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_UpperCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 426 |
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class a :
"""simple docstring"""
def __init__( self , snake_case_ ) -> Union[str, Any]:
_UpperCAmelCase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_UpperCAmelCase = len(snake_case_ ) - 1
def __A ( self , snake_case_ ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_UpperCAmelCase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , snake_case_ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(snake_case_ ) , 5 ) == 1
return output_values
def __A ( self , snake_case_ ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_UpperCAmelCase = self.basis_function(snake_case_ )
_UpperCAmelCase = 0.0
_UpperCAmelCase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __A ( self , snake_case_ = 0.01 ) -> int:
from matplotlib import pyplot as plt # type: ignore
_UpperCAmelCase = [] # x coordinates of points to plot
_UpperCAmelCase = [] # y coordinates of points to plot
_UpperCAmelCase = 0.0
while t <= 1:
_UpperCAmelCase = self.bezier_curve_function(snake_case_ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_UpperCAmelCase = [i[0] for i in self.list_of_points]
_UpperCAmelCase = [i[1] for i in self.list_of_points]
plt.plot(
snake_case_ , snake_case_ , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(snake_case_ , snake_case_ , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 426 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 703 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "sew"
def __init__( self : List[Any] , _A : Tuple=3_2 , _A : str=7_6_8 , _A : Dict=1_2 , _A : Tuple=1_2 , _A : Optional[Any]=3_0_7_2 , _A : List[str]=2 , _A : Dict="gelu" , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.0 , _A : str=0.1 , _A : Tuple=0.1 , _A : Optional[int]=0.02 , _A : Dict=1e-5 , _A : str="group" , _A : Tuple="gelu" , _A : Union[str, Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _A : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : Any=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : Tuple=False , _A : Tuple=1_2_8 , _A : int=1_6 , _A : Union[str, Any]=True , _A : Optional[Any]=0.05 , _A : List[Any]=1_0 , _A : Union[str, Any]=2 , _A : Tuple=0.0 , _A : Union[str, Any]=1_0 , _A : Optional[int]=0 , _A : Union[str, Any]="mean" , _A : Optional[int]=False , _A : List[Any]=False , _A : int=2_5_6 , _A : str=0 , _A : Optional[int]=1 , _A : List[Any]=2 , **_A : Dict , ):
"""simple docstring"""
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A)
_SCREAMING_SNAKE_CASE : str = hidden_size
_SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_norm
_SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation
_SCREAMING_SNAKE_CASE : Dict = list(_A)
_SCREAMING_SNAKE_CASE : int = list(_A)
_SCREAMING_SNAKE_CASE : int = list(_A)
_SCREAMING_SNAKE_CASE : str = conv_bias
_SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embeddings
_SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embedding_groups
_SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim)
_SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[str] = intermediate_size
_SCREAMING_SNAKE_CASE : str = squeeze_factor
_SCREAMING_SNAKE_CASE : Dict = hidden_act
_SCREAMING_SNAKE_CASE : str = num_attention_heads
_SCREAMING_SNAKE_CASE : Dict = hidden_dropout
_SCREAMING_SNAKE_CASE : Tuple = attention_dropout
_SCREAMING_SNAKE_CASE : int = activation_dropout
_SCREAMING_SNAKE_CASE : Any = feat_proj_dropout
_SCREAMING_SNAKE_CASE : str = final_dropout
_SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop
_SCREAMING_SNAKE_CASE : Any = layer_norm_eps
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : List[Any] = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_SCREAMING_SNAKE_CASE : List[Any] = apply_spec_augment
_SCREAMING_SNAKE_CASE : List[Any] = mask_time_prob
_SCREAMING_SNAKE_CASE : List[str] = mask_time_length
_SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks
_SCREAMING_SNAKE_CASE : List[Any] = mask_feature_prob
_SCREAMING_SNAKE_CASE : int = mask_feature_length
_SCREAMING_SNAKE_CASE : List[Any] = mask_feature_min_masks
# ctc loss
_SCREAMING_SNAKE_CASE : int = ctc_loss_reduction
_SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity
# sequence classification
_SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum
_SCREAMING_SNAKE_CASE : List[str] = classifier_proj_size
@property
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1)
| 635 | 0 |
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
__lowerCamelCase : Any = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
__lowerCamelCase : Tuple = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
__lowerCamelCase : List[Any] = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
__lowerCamelCase : Optional[Any] = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase ( datasets.Metric):
"""simple docstring"""
def UpperCamelCase__ ( self : List[Any] ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def UpperCamelCase__ ( self : List[str] , UpperCamelCase__ : List[Any] ) -> Dict:
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def UpperCamelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=0.9 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : str=0.5 ) -> Dict:
if NLTK_VERSION >= version.Version('''3.6.5''' ):
_UpperCamelCase =[
meteor_score.single_meteor_score(
word_tokenize(UpperCamelCase__ ) , word_tokenize(UpperCamelCase__ ) , alpha=UpperCamelCase__ , beta=UpperCamelCase__ , gamma=UpperCamelCase__ )
for ref, pred in zip(UpperCamelCase__ , UpperCamelCase__ )
]
else:
_UpperCamelCase =[
meteor_score.single_meteor_score(UpperCamelCase__ , UpperCamelCase__ , alpha=UpperCamelCase__ , beta=UpperCamelCase__ , gamma=UpperCamelCase__ )
for ref, pred in zip(UpperCamelCase__ , UpperCamelCase__ )
]
return {"meteor": np.mean(UpperCamelCase__ )}
| 404 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
lowerCAmelCase_ = (UnCLIPScheduler,)
def UpperCamelCase__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict ) -> Optional[Any]:
_UpperCamelCase ={
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**UpperCamelCase__ )
return config
def UpperCamelCase__ ( self : str ) -> Tuple:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def UpperCamelCase__ ( self : List[str] ) -> int:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def UpperCamelCase__ ( self : List[str] ) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def UpperCamelCase__ ( self : List[Any] ) -> List[str]:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def UpperCamelCase__ ( self : List[Any] ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def UpperCamelCase__ ( self : Optional[Any] ) -> Tuple:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ )
def UpperCamelCase__ ( self : List[str] ) -> Dict:
_UpperCamelCase =self.scheduler_classes[0]
_UpperCamelCase =self.get_scheduler_config(variance_type='''fixed_small_log''' )
_UpperCamelCase =scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1E-5
def UpperCamelCase__ ( self : Any ) -> Tuple:
_UpperCamelCase =self.scheduler_classes[0]
_UpperCamelCase =self.get_scheduler_config(variance_type='''learned_range''' )
_UpperCamelCase =scheduler_class(**UpperCamelCase__ )
_UpperCamelCase =0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -10.1712790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=UpperCamelCase__ ) - -5.7998052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=UpperCamelCase__ ) - -0.0010011 < 1E-5
def UpperCamelCase__ ( self : str ) -> Optional[int]:
_UpperCamelCase =self.scheduler_classes[0]
_UpperCamelCase =self.get_scheduler_config()
_UpperCamelCase =scheduler_class(**UpperCamelCase__ )
_UpperCamelCase =scheduler.timesteps
_UpperCamelCase =self.dummy_model()
_UpperCamelCase =self.dummy_sample_deter
_UpperCamelCase =torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
_UpperCamelCase =model(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
_UpperCamelCase =scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
_UpperCamelCase =pred_prev_sample
_UpperCamelCase =torch.sum(torch.abs(UpperCamelCase__ ) )
_UpperCamelCase =torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2682495 ) < 1E-2
assert abs(result_mean.item() - 0.3284743 ) < 1E-3
def UpperCamelCase__ ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase =self.scheduler_classes[0]
_UpperCamelCase =self.get_scheduler_config()
_UpperCamelCase =scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
_UpperCamelCase =scheduler.timesteps
_UpperCamelCase =self.dummy_model()
_UpperCamelCase =self.dummy_sample_deter
_UpperCamelCase =torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
_UpperCamelCase =model(UpperCamelCase__ , UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
_UpperCamelCase =None
else:
_UpperCamelCase =timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_UpperCamelCase =scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
_UpperCamelCase =pred_prev_sample
_UpperCamelCase =torch.sum(torch.abs(UpperCamelCase__ ) )
_UpperCamelCase =torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2044983 ) < 1E-2
assert abs(result_mean.item() - 0.3362038 ) < 1E-3
def UpperCamelCase__ ( self : str ) -> Any:
pass
def UpperCamelCase__ ( self : List[str] ) -> str:
pass
| 404 | 1 |
def snake_case ( UpperCAmelCase : int ):
A = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def snake_case ( UpperCAmelCase : int = 1_00 ):
A = 1
A = 2
for i in range(2, max_n + 1 ):
A = pre_numerator
A = 2 * i // 3 if i % 3 == 0 else 1
A = cur_numerator
A = e_cont * pre_numerator + temp
return sum_digits(UpperCAmelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 110 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase_ = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def snake_case ( UpperCAmelCase : str = "mumbai" ):
A = BeautifulSoup(requests.get(url + location ).content, 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div', attrs={'data-tn-component': 'organicJob'} ):
A = job.find('a', attrs={'data-tn-element': 'jobTitle'} ).text.strip()
A = job.find('span', {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 110 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
lowerCAmelCase__ : List[str] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ : str = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
lowerCAmelCase__ : int = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_6000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
lowerCAmelCase__ : str = tempfile.mkdtemp()
lowerCAmelCase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ : Tuple = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
# load decoder from hub
lowerCAmelCase__ : Optional[Any] = '''hf-internal-testing/ngram-beam-search-decoder'''
def __magic_name__( self , **__UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__UpperCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__( self , **__UpperCAmelCase ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__( self , **__UpperCAmelCase ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__UpperCAmelCase )
def __magic_name__( self ):
shutil.rmtree(self.tmpdirname )
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = self.get_tokenizer()
lowerCAmelCase__ : Dict = self.get_feature_extractor()
lowerCAmelCase__ : Union[str, Any] = self.get_decoder()
lowerCAmelCase__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __UpperCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : str = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowerCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(__UpperCAmelCase , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=__UpperCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[Any] = self.get_feature_extractor()
lowerCAmelCase__ : str = self.get_tokenizer()
lowerCAmelCase__ : str = self.get_decoder()
lowerCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = floats_list((3, 1000) )
lowerCAmelCase__ : Tuple = feature_extractor(__UpperCAmelCase , return_tensors='''np''' )
lowerCAmelCase__ : Tuple = processor(__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__( self ):
lowerCAmelCase__ : str = self.get_feature_extractor()
lowerCAmelCase__ : Any = self.get_tokenizer()
lowerCAmelCase__ : str = self.get_decoder()
lowerCAmelCase__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = '''This is a test string'''
lowerCAmelCase__ : Optional[Any] = processor(text=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__( self , __UpperCAmelCase=(2, 10, 16) , __UpperCAmelCase=77 ):
np.random.seed(__UpperCAmelCase )
return np.random.rand(*__UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Any = self.get_feature_extractor()
lowerCAmelCase__ : str = self.get_tokenizer()
lowerCAmelCase__ : int = self.get_decoder()
lowerCAmelCase__ : List[str] = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
lowerCAmelCase__ : Any = self._get_dummy_logits(shape=(10, 16) , seed=13 )
lowerCAmelCase__ : Any = processor.decode(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = decoder.decode_beams(__UpperCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = self.get_feature_extractor()
lowerCAmelCase__ : str = self.get_tokenizer()
lowerCAmelCase__ : List[str] = self.get_decoder()
lowerCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
lowerCAmelCase__ : str = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowerCAmelCase__ : Optional[Any] = processor.batch_decode(__UpperCAmelCase )
else:
with get_context(__UpperCAmelCase ).Pool() as pool:
lowerCAmelCase__ : Dict = processor.batch_decode(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = list(__UpperCAmelCase )
with get_context('''fork''' ).Pool() as p:
lowerCAmelCase__ : List[Any] = decoder.decode_beams_batch(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__UpperCAmelCase , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(__UpperCAmelCase , decoded_processor.logit_score )
self.assertListEqual(__UpperCAmelCase , decoded_processor.lm_score )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.get_feature_extractor()
lowerCAmelCase__ : int = self.get_tokenizer()
lowerCAmelCase__ : List[Any] = self.get_decoder()
lowerCAmelCase__ : Any = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = self._get_dummy_logits()
lowerCAmelCase__ : Tuple = 15
lowerCAmelCase__ : str = -20.0
lowerCAmelCase__ : Tuple = -4.0
lowerCAmelCase__ : Dict = processor.batch_decode(
__UpperCAmelCase , beam_width=__UpperCAmelCase , beam_prune_logp=__UpperCAmelCase , token_min_logp=__UpperCAmelCase , )
lowerCAmelCase__ : List[Any] = decoded_processor_out.text
lowerCAmelCase__ : Dict = list(__UpperCAmelCase )
with get_context('''fork''' ).Pool() as pool:
lowerCAmelCase__ : List[Any] = decoder.decode_beams_batch(
__UpperCAmelCase , __UpperCAmelCase , beam_width=__UpperCAmelCase , beam_prune_logp=__UpperCAmelCase , token_min_logp=__UpperCAmelCase , )
lowerCAmelCase__ : Any = [d[0][0] for d in decoded_decoder_out]
lowerCAmelCase__ : str = [d[0][2] for d in decoded_decoder_out]
lowerCAmelCase__ : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , __UpperCAmelCase )
self.assertTrue(np.array_equal(__UpperCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __UpperCAmelCase , atol=1e-3 ) )
self.assertTrue(np.array_equal(__UpperCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __UpperCAmelCase , atol=1e-3 ) )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.get_feature_extractor()
lowerCAmelCase__ : List[Any] = self.get_tokenizer()
lowerCAmelCase__ : Optional[int] = self.get_decoder()
lowerCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = self._get_dummy_logits()
lowerCAmelCase__ : Optional[Any] = 2.0
lowerCAmelCase__ : List[Any] = 5.0
lowerCAmelCase__ : Tuple = -20.0
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : int = processor.batch_decode(
__UpperCAmelCase , alpha=__UpperCAmelCase , beta=__UpperCAmelCase , unk_score_offset=__UpperCAmelCase , lm_score_boundary=__UpperCAmelCase , )
lowerCAmelCase__ : Any = decoded_processor_out.text
lowerCAmelCase__ : int = list(__UpperCAmelCase )
decoder.reset_params(
alpha=__UpperCAmelCase , beta=__UpperCAmelCase , unk_score_offset=__UpperCAmelCase , lm_score_boundary=__UpperCAmelCase , )
with get_context('''fork''' ).Pool() as pool:
lowerCAmelCase__ : Union[str, Any] = decoder.decode_beams_batch(
__UpperCAmelCase , __UpperCAmelCase , )
lowerCAmelCase__ : str = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , __UpperCAmelCase )
lowerCAmelCase__ : int = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase__ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
lowerCAmelCase__ : List[str] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCAmelCase__ : Optional[int] = os.listdir(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = snapshot_download('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
lowerCAmelCase__ : Optional[Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCAmelCase__ : List[Any] = os.listdir(__UpperCAmelCase )
lowerCAmelCase__ : Dict = os.listdir(__UpperCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase__ : List[str] = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase__ : Union[str, Any] = floats_list((3, 1000) )
lowerCAmelCase__ : Optional[int] = processor_wavaveca(__UpperCAmelCase , return_tensors='''np''' )
lowerCAmelCase__ : int = processor_auto(__UpperCAmelCase , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
lowerCAmelCase__ : Union[str, Any] = self._get_dummy_logits()
lowerCAmelCase__ : List[str] = processor_wavaveca.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = processor_auto.batch_decode(__UpperCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = self.get_feature_extractor()
lowerCAmelCase__ : str = self.get_tokenizer()
lowerCAmelCase__ : Any = self.get_decoder()
lowerCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Tuple = [d[key] for d in offsets]
return retrieved_list
def __magic_name__( self ):
lowerCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase__ : Any = self._get_dummy_logits()[0]
lowerCAmelCase__ : Tuple = processor.decode(__UpperCAmelCase , output_word_offsets=__UpperCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def __magic_name__( self ):
lowerCAmelCase__ : int = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase__ : Optional[int] = self._get_dummy_logits()
lowerCAmelCase__ : Dict = processor.batch_decode(__UpperCAmelCase , output_word_offsets=__UpperCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(__UpperCAmelCase , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __magic_name__( self ):
import torch
lowerCAmelCase__ : Tuple = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=__UpperCAmelCase )
lowerCAmelCase__ : Tuple = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_6000 ) )
lowerCAmelCase__ : Dict = iter(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = next(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
lowerCAmelCase__ : str = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowerCAmelCase__ : int = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase ).logits.cpu().numpy()
lowerCAmelCase__ : Optional[int] = processor.decode(logits[0] , output_word_offsets=__UpperCAmelCase )
lowerCAmelCase__ : int = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowerCAmelCase__ : Union[str, Any] = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
lowerCAmelCase__ : List[Any] = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(__UpperCAmelCase , '''word''' ) ) , __UpperCAmelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(__UpperCAmelCase , '''word''' ) ) , output.text )
# output times
lowerCAmelCase__ : List[str] = torch.tensor(self.get_from_offsets(__UpperCAmelCase , '''start_time''' ) )
lowerCAmelCase__ : List[str] = torch.tensor(self.get_from_offsets(__UpperCAmelCase , '''end_time''' ) )
# fmt: off
lowerCAmelCase__ : Tuple = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
lowerCAmelCase__ : List[Any] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=0.01 ) )
| 678 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _lowerCAmelCase ( unittest.TestCase ):
A__ = MODEL_FOR_CAUSAL_LM_MAPPING
A__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
__UpperCAmelCase , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
lowerCAmelCase__ : str = text_generator('''This is a test''' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
] , )
lowerCAmelCase__ : List[Any] = text_generator.model.config.eos_token_id
lowerCAmelCase__ : List[Any] = '''<pad>'''
lowerCAmelCase__ : List[Any] = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , )
self.assertEqual(
__UpperCAmelCase , [
[
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
],
[
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
{'''generated_token_ids''': ANY(__UpperCAmelCase )},
],
] , )
@require_tf
def __magic_name__( self ):
lowerCAmelCase__ : int = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Dict = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
return text_generator, ["This is a test", "Another test"]
def __magic_name__( self ):
lowerCAmelCase__ : Any = '''Hello I believe in'''
lowerCAmelCase__ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ : Optional[int] = text_generator(__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , stop_sequence=''' fe''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = text_generator.model
lowerCAmelCase__ : Optional[int] = text_generator.tokenizer
lowerCAmelCase__ : Tuple = text_generator('''This is a test''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ : Dict = pipeline(task='''text-generation''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase )
lowerCAmelCase__ : Dict = text_generator('''This is a test''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ : List[str] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ : Optional[int] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCAmelCase__ : List[str] = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
[{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}],
] , )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Any = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : str = text_generator('''test''' , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCAmelCase__ : str = text_generator('''''' )
self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCAmelCase__ : List[str] = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCAmelCase__ : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
lowerCAmelCase__ : Optional[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__UpperCAmelCase ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __magic_name__( self ):
import torch
# Classic `model_kwargs`
lowerCAmelCase__ : List[str] = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ : Any = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCAmelCase__ : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ : Union[str, Any] = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCAmelCase__ : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCAmelCase__ : Any = pipe('''This is a test''' )
self.assertEqual(
__UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def __magic_name__( self ):
import torch
lowerCAmelCase__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def __magic_name__( self ):
import torch
lowerCAmelCase__ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=__UpperCAmelCase , top_p=0.5 )
def __magic_name__( self ):
lowerCAmelCase__ : int = '''Hello world'''
lowerCAmelCase__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
lowerCAmelCase__ : List[Any] = logging.get_logger('''transformers.generation.tf_utils''' )
else:
lowerCAmelCase__ : Dict = logging.get_logger('''transformers.generation.utils''' )
lowerCAmelCase__ : Optional[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(__UpperCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : Any = text_generator(__UpperCAmelCase , max_new_tokens=1 )
self.assertNotIn(__UpperCAmelCase , cl.out )
with CaptureLogger(__UpperCAmelCase ) as cl:
lowerCAmelCase__ : Union[str, Any] = text_generator(__UpperCAmelCase , max_length=10 )
self.assertNotIn(__UpperCAmelCase , cl.out )
| 678 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(lowerCamelCase_ ) )]
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
SCREAMING_SNAKE_CASE : Tuple = all_rotations(lowerCamelCase_ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
SCREAMING_SNAKE_CASE : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(lowerCamelCase_ ),
}
return response
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
SCREAMING_SNAKE_CASE : List[Any] = int(lowerCamelCase_ )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(lowerCamelCase_ ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
SCREAMING_SNAKE_CASE : List[str] = [""""""] * len(lowerCamelCase_ )
for _ in range(len(lowerCamelCase_ ) ):
for i in range(len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__UpperCAmelCase = """Provide a string that I will generate its BWT transform: """
__UpperCAmelCase = input(entry_msg).strip()
__UpperCAmelCase = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result['bwt_string']}\''''
)
__UpperCAmelCase = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
f'''we get original string \'{original_string}\''''
)
| 713 |
'''simple docstring'''
def __A ( lowerCamelCase_ ):
"""simple docstring"""
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : List[str] = number
while duplicate > 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = divmod(lowerCamelCase_ , 10 )
fact_sum += factorial(lowerCamelCase_ )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
__UpperCAmelCase = int(input("""Enter number: """).strip())
print(
f'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.'''
)
| 79 | 0 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
a__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(SCREAMING_SNAKE_CASE__ ):
return ext
raise Exception(
F'''Unable to determine file format from file extension {path}. '''
F'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
_snake_case : str = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_snake_case : Optional[Any] = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
_snake_case : Any = PipelineDataFormat.from_str(
format=SCREAMING_SNAKE_CASE__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : Pipeline , lowerCAmelCase : PipelineDataFormat) -> Dict:
"""simple docstring"""
_snake_case : int = nlp
_snake_case : Dict = reader
@staticmethod
def UpperCamelCase_ ( lowerCAmelCase : ArgumentParser) -> Any:
"""simple docstring"""
_snake_case : Any = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""")
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""")
run_parser.add_argument("""--input""" , type=lowerCAmelCase , help="""Path to the file to use for inference""")
run_parser.add_argument("""--output""" , type=lowerCAmelCase , help="""Path to the file that will be used post to write results.""")
run_parser.add_argument("""--model""" , type=lowerCAmelCase , help="""Name or path to the model to instantiate.""")
run_parser.add_argument("""--config""" , type=lowerCAmelCase , help="""Name or path to the model's config to instantiate.""")
run_parser.add_argument(
"""--tokenizer""" , type=lowerCAmelCase , help="""Name of the tokenizer to use. (default: same as the model name)""")
run_parser.add_argument(
"""--column""" , type=lowerCAmelCase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=lowerCAmelCase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=lowerCAmelCase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""")
run_parser.set_defaults(func=lowerCAmelCase)
def UpperCamelCase_ ( self : Optional[int]) -> Tuple:
"""simple docstring"""
_snake_case , _snake_case : int = self._nlp, []
for entry in self._reader:
_snake_case : List[Any] = nlp(**lowerCAmelCase) if self._reader.is_multi_columns else nlp(lowerCAmelCase)
if isinstance(lowerCAmelCase , lowerCAmelCase):
outputs.append(lowerCAmelCase)
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_snake_case : Any = self._reader.save_binary(lowerCAmelCase)
logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''')
else:
self._reader.save(lowerCAmelCase)
| 477 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
a__ = logging.get_logger(__name__)
a__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a__ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
a__ = {
"""google/realm-cc-news-pretrained-embedder""": 5_12,
"""google/realm-cc-news-pretrained-encoder""": 5_12,
"""google/realm-cc-news-pretrained-scorer""": 5_12,
"""google/realm-cc-news-pretrained-openqa""": 5_12,
"""google/realm-orqa-nq-openqa""": 5_12,
"""google/realm-orqa-nq-reader""": 5_12,
"""google/realm-orqa-wq-openqa""": 5_12,
"""google/realm-orqa-wq-reader""": 5_12,
}
a__ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = VOCAB_FILES_NAMES
snake_case_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : Any = PRETRAINED_INIT_CONFIGURATION
snake_case_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : Dict = RealmTokenizer
def __init__( self : int , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Union[str, Any]="[UNK]" , lowerCAmelCase : List[str]="[SEP]" , lowerCAmelCase : Optional[int]="[PAD]" , lowerCAmelCase : List[Any]="[CLS]" , lowerCAmelCase : Any="[MASK]" , lowerCAmelCase : Dict=True , lowerCAmelCase : int=None , **lowerCAmelCase : Tuple , ) -> List[str]:
"""simple docstring"""
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
_snake_case : str = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase) != tokenize_chinese_chars
):
_snake_case : Tuple = getattr(lowerCAmelCase , normalizer_state.pop("""type"""))
_snake_case : Any = do_lower_case
_snake_case : Optional[int] = strip_accents
_snake_case : str = tokenize_chinese_chars
_snake_case : List[str] = normalizer_class(**lowerCAmelCase)
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : List[str] , **lowerCAmelCase : Tuple) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Dict = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[Any] = kwargs.pop("""text_pair""" , lowerCAmelCase)
_snake_case : Union[str, Any] = kwargs.pop("""return_tensors""" , lowerCAmelCase)
_snake_case : Optional[Any] = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(lowerCAmelCase):
if batch_text_pair is not None:
_snake_case : Dict = batch_text_pair[idx]
else:
_snake_case : List[str] = None
_snake_case : Optional[int] = super().__call__(lowerCAmelCase , lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
_snake_case : str = encoded_candidates.get("""input_ids""")
_snake_case : Union[str, Any] = encoded_candidates.get("""attention_mask""")
_snake_case : Any = encoded_candidates.get("""token_type_ids""")
if encoded_input_ids is not None:
output_data["input_ids"].append(lowerCAmelCase)
if encoded_attention_mask is not None:
output_data["attention_mask"].append(lowerCAmelCase)
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(lowerCAmelCase)
_snake_case : str = {key: item for key, item in output_data.items() if len(lowerCAmelCase) != 0}
return BatchEncoding(lowerCAmelCase , tensor_type=lowerCAmelCase)
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any]=None) -> List[str]:
"""simple docstring"""
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_snake_case : List[Any] = [self.sep_token_id]
_snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_snake_case : Dict = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase)
return tuple(lowerCAmelCase)
| 477 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
UpperCAmelCase_ : List[Any] = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class __A ( snake_case_ ):
UpperCamelCase = """albert"""
def __init__( self :Optional[Any] , __snake_case :Optional[Any]=3_00_00 , __snake_case :Dict=1_28 , __snake_case :Union[str, Any]=40_96 , __snake_case :List[Any]=12 , __snake_case :int=1 , __snake_case :str=64 , __snake_case :Any=1_63_84 , __snake_case :str=1 , __snake_case :List[str]="gelu_new" , __snake_case :Optional[int]=0 , __snake_case :Dict=0 , __snake_case :Optional[int]=5_12 , __snake_case :Any=2 , __snake_case :Optional[Any]=0.02 , __snake_case :Optional[Any]=1E-12 , __snake_case :Optional[int]=0.1 , __snake_case :Tuple="absolute" , __snake_case :Union[str, Any]=0 , __snake_case :Tuple=2 , __snake_case :int=3 , **__snake_case :List[str] , ):
'''simple docstring'''
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
__magic_name__ : Optional[int] =vocab_size
__magic_name__ : Dict =embedding_size
__magic_name__ : Any =hidden_size
__magic_name__ : str =num_hidden_layers
__magic_name__ : Dict =num_hidden_groups
__magic_name__ : Dict =num_attention_heads
__magic_name__ : Optional[Any] =inner_group_num
__magic_name__ : Union[str, Any] =hidden_act
__magic_name__ : str =intermediate_size
__magic_name__ : Optional[Any] =hidden_dropout_prob
__magic_name__ : List[str] =attention_probs_dropout_prob
__magic_name__ : Any =max_position_embeddings
__magic_name__ : int =type_vocab_size
__magic_name__ : Optional[int] =initializer_range
__magic_name__ : str =layer_norm_eps
__magic_name__ : Tuple =classifier_dropout_prob
__magic_name__ : List[str] =position_embedding_type
class __A ( snake_case_ ):
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__magic_name__ : List[Any] ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__magic_name__ : Tuple ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 701 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __A ( UpperCamelCase__ ):
UpperCamelCase = """Speech2TextFeatureExtractor"""
UpperCamelCase = """Speech2TextTokenizer"""
def __init__( self :Any , __snake_case :int , __snake_case :Any ):
'''simple docstring'''
super().__init__(__snake_case , __snake_case )
__magic_name__ : Optional[int] =self.feature_extractor
__magic_name__ : Any =False
def __call__( self :Tuple , *__snake_case :List[str] , **__snake_case :List[str] ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
__magic_name__ : Tuple =kwargs.pop("""raw_speech""" )
else:
__magic_name__ : List[Any] =kwargs.pop("""audio""" , __snake_case )
__magic_name__ : Union[str, Any] =kwargs.pop("""sampling_rate""" , __snake_case )
__magic_name__ : Dict =kwargs.pop("""text""" , __snake_case )
if len(__snake_case ) > 0:
__magic_name__ : Union[str, Any] =args[0]
__magic_name__ : int =args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
__magic_name__ : List[str] =self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if text is not None:
__magic_name__ : Any =self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__magic_name__ : List[str] =encodings["""input_ids"""]
return inputs
def A__ ( self :List[Any] , *__snake_case :str , **__snake_case :List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def A__ ( self :int , *__snake_case :int , **__snake_case :Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@contextmanager
def A__ ( self :int ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
__magic_name__ : int =True
__magic_name__ : List[Any] =self.tokenizer
yield
__magic_name__ : List[Any] =self.feature_extractor
__magic_name__ : str =False
| 367 | 0 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
_UpperCamelCase = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
_UpperCamelCase = {
'''allenai/led-base-16384''': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def UpperCamelCase_( ) -> Any:
UpperCAmelCase__ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
UpperCAmelCase__ = bs[:]
UpperCAmelCase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
UpperCAmelCase__ = [chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def UpperCamelCase_( snake_case__: Dict ) -> str:
UpperCAmelCase__ = set()
UpperCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ = char
return pairs
class lowercase ( snake_case__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__(self , __a , __a , __a="replace" , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a=False , **__a , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token
UpperCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token
UpperCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token
UpperCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token
UpperCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token
UpperCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
super().__init__(
errors=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding='utf-8' ) as vocab_handle:
UpperCAmelCase__ = json.load(_UpperCamelCase )
UpperCAmelCase__ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ = errors # how to handle errors in decoding
UpperCAmelCase__ = bytes_to_unicode()
UpperCAmelCase__ = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCamelCase , encoding='utf-8' ) as merges_handle:
UpperCAmelCase__ = merges_handle.read().split('\n' )[1:-1]
UpperCAmelCase__ = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase__ = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
UpperCAmelCase__ = {}
UpperCAmelCase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase__ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ (self , __a ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ = tuple(_UpperCamelCase )
UpperCAmelCase__ = get_pairs(_UpperCamelCase )
if not pairs:
return token
while True:
UpperCAmelCase__ = min(_UpperCamelCase , key=lambda __a : self.bpe_ranks.get(_UpperCamelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ = bigram
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
while i < len(_UpperCamelCase ):
try:
UpperCAmelCase__ = word.index(_UpperCamelCase , _UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ = j
if word[i] == first and i < len(_UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ = tuple(_UpperCamelCase )
UpperCAmelCase__ = new_word
if len(_UpperCamelCase ) == 1:
break
else:
UpperCAmelCase__ = get_pairs(_UpperCamelCase )
UpperCAmelCase__ = ' '.join(_UpperCamelCase )
UpperCAmelCase__ = word
return word
def UpperCamelCase__ (self , __a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = []
for token in re.findall(self.pat , _UpperCamelCase ):
UpperCAmelCase__ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCamelCase ).split(' ' ) )
return bpe_tokens
def UpperCamelCase__ (self , __a ) -> Optional[Any]:
"""simple docstring"""
return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ (self , __a ) -> Any:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase )
def UpperCamelCase__ (self , __a ) -> str:
"""simple docstring"""
UpperCAmelCase__ = ''.join(_UpperCamelCase )
UpperCAmelCase__ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCamelCase__ (self , __a , __a = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase ) + '\n' )
UpperCAmelCase__ = 0
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
UpperCAmelCase__ = token_index
writer.write(' '.join(_UpperCamelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ (self , __a , __a = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
UpperCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ (self , __a , __a = None , __a = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCamelCase__ (self , __a , __a = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ (self , __a , __a=False , **__a ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCamelCase ) > 0 and not text[0].isspace()):
UpperCAmelCase__ = ' ' + text
return (text, kwargs)
def UpperCamelCase__ (self , __a , __a = None , __a = PaddingStrategy.DO_NOT_PAD , __a = None , __a = None , ) -> dict:
"""simple docstring"""
UpperCAmelCase__ = super()._pad(
encoded_inputs=_UpperCamelCase , max_length=_UpperCamelCase , padding_strategy=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase__ = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase__ = len(encoded_inputs['global_attention_mask'] ) != len(_UpperCamelCase )
if needs_to_be_padded:
UpperCAmelCase__ = len(_UpperCamelCase ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase__ = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase__ = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 146 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( __A :Optional[int] ):
"""simple docstring"""
return DownloadCommand(args.model ,args.cache_dir ,args.force ,args.trust_remote_code )
class __snake_case ( snake_case__ ):
"""simple docstring"""
@staticmethod
def a ( _UpperCamelCase ) -> Any:
"""simple docstring"""
__snake_case = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=_UpperCamelCase , default=_UpperCamelCase , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=_UpperCamelCase , help="""Name of the model to download""" )
download_parser.set_defaults(func=_UpperCamelCase )
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
__snake_case = model
__snake_case = cache
__snake_case = force
__snake_case = trust_remote_code
def a ( self ) -> List[Any]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 268 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 117 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__snake_case = NewType("""DataClass""", Any)
__snake_case = NewType("""DataClassType""", Any)
def __lowerCAmelCase ( lowercase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if isinstance(lowercase , lowercase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def __lowerCAmelCase ( lowercase : list ) -> Callable[[str], Any]:
"""simple docstring"""
snake_case : Any = {str(lowercase ): choice for choice in choices}
return lambda lowercase : str_to_choice.get(lowercase , lowercase )
def __lowerCAmelCase ( *,
lowercase : Union[str, List[str]] = None , lowercase : str = None , lowercase : Any = dataclasses.MISSING , lowercase : Callable[[], Any] = dataclasses.MISSING , lowercase : dict = None , **lowercase : Tuple , ) -> dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
snake_case : Any = {}
if aliases is not None:
snake_case : List[str] = aliases
if help is not None:
snake_case : Union[str, Any] = help
return dataclasses.field(metadata=lowercase , default=lowercase , default_factory=lowercase , **lowercase )
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Iterable[DataClassType]
def __init__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
if "formatter_class" not in kwargs:
snake_case : List[Any] = ArgumentDefaultsHelpFormatter
super().__init__(**UpperCamelCase__ )
if dataclasses.is_dataclass(UpperCamelCase__ ):
snake_case : str = [dataclass_types]
snake_case : Any = list(UpperCamelCase__ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(UpperCamelCase__ )
@staticmethod
def lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
snake_case : Tuple = F'--{field.name}'
snake_case : Optional[Any] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , UpperCamelCase__ ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
snake_case : int = kwargs.pop("aliases" , [] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Tuple = [aliases]
snake_case : Tuple = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(UpperCamelCase__ , "UnionType" ) and isinstance(UpperCamelCase__ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(UpperCamelCase__ ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F' Problem encountered in field \'{field.name}\'.' )
if type(UpperCamelCase__ ) not in field.type.__args__:
# filter `str` in Union
snake_case : Union[str, Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
snake_case : str = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
snake_case : List[Any] = (
field.type.__args__[0] if isinstance(UpperCamelCase__ , field.type.__args__[1] ) else field.type.__args__[1]
)
snake_case : Tuple = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
snake_case : Optional[int] = {}
if origin_type is Literal or (isinstance(field.type , UpperCamelCase__ ) and issubclass(field.type , UpperCamelCase__ )):
if origin_type is Literal:
snake_case : Any = field.type.__args__
else:
snake_case : Dict = [x.value for x in field.type]
snake_case : List[str] = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
snake_case : Any = field.default
else:
snake_case : List[Any] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
snake_case : Optional[Any] = copy(UpperCamelCase__ )
# Hack because type=bool in argparse does not behave as we want.
snake_case : Optional[Any] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
snake_case : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
snake_case : Any = default
# This tells argparse we accept 0 or 1 value after --field_name
snake_case : Optional[int] = "?"
# This is the value that will get picked if we do --field_name (without value)
snake_case : Any = True
elif isclass(UpperCamelCase__ ) and issubclass(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : List[Any] = field.type.__args__[0]
snake_case : Tuple = "+"
if field.default_factory is not dataclasses.MISSING:
snake_case : Union[str, Any] = field.default_factory()
elif field.default is dataclasses.MISSING:
snake_case : Dict = True
else:
snake_case : List[str] = field.type
if field.default is not dataclasses.MISSING:
snake_case : List[Any] = field.default
elif field.default_factory is not dataclasses.MISSING:
snake_case : Optional[Any] = field.default_factory()
else:
snake_case : Any = True
parser.add_argument(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
snake_case : Optional[Any] = False
parser.add_argument(F'--no_{field.name}' , action="store_false" , dest=field.name , **UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
if hasattr(UpperCamelCase__ , "_argument_group_name" ):
snake_case : Optional[Any] = self.add_argument_group(dtype._argument_group_name )
else:
snake_case : int = self
try:
snake_case : Dict[str, type] = get_type_hints(UpperCamelCase__ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(UpperCamelCase__ ):
snake_case : Dict = ".".join(map(UpperCamelCase__ , sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(UpperCamelCase__ ):
if not field.init:
continue
snake_case : Optional[int] = type_hints[field.name]
self._parse_dataclass_field(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=None , ) -> Tuple[DataClass, ...]:
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
snake_case : Optional[int] = []
if args_filename:
args_files.append(Path(UpperCamelCase__ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
snake_case : str = ArgumentParser()
args_file_parser.add_argument(UpperCamelCase__ , type=UpperCamelCase__ , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
snake_case ,snake_case : Dict = args_file_parser.parse_known_args(args=UpperCamelCase__ )
snake_case : int = vars(UpperCamelCase__ ).get(args_file_flag.lstrip("-" ) , UpperCamelCase__ )
if cmd_args_file_paths:
args_files.extend([Path(UpperCamelCase__ ) for p in cmd_args_file_paths] )
snake_case : Optional[Any] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
snake_case : Dict = file_args + args if args is not None else file_args + sys.argv[1:]
snake_case ,snake_case : Any = self.parse_known_args(args=UpperCamelCase__ )
snake_case : List[str] = []
for dtype in self.dataclass_types:
snake_case : Optional[int] = {f.name for f in dataclasses.fields(UpperCamelCase__ ) if f.init}
snake_case : Optional[int] = {k: v for k, v in vars(UpperCamelCase__ ).items() if k in keys}
for k in keys:
delattr(UpperCamelCase__ , UpperCamelCase__ )
snake_case : List[str] = dtype(**UpperCamelCase__ )
outputs.append(UpperCamelCase__ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(UpperCamelCase__ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
snake_case : int = set(args.keys() )
snake_case : int = []
for dtype in self.dataclass_types:
snake_case : Tuple = {f.name for f in dataclasses.fields(UpperCamelCase__ ) if f.init}
snake_case : Dict = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
snake_case : Optional[Any] = dtype(**UpperCamelCase__ )
outputs.append(UpperCamelCase__ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(UpperCamelCase__ )}' )
return tuple(UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
with open(Path(UpperCamelCase__ ) , encoding="utf-8" ) as open_json_file:
snake_case : Optional[Any] = json.loads(open_json_file.read() )
snake_case : int = self.parse_dict(UpperCamelCase__ , allow_extra_keys=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
snake_case : Optional[int] = self.parse_dict(yaml.safe_load(Path(UpperCamelCase__ ).read_text() ) , allow_extra_keys=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 117 | 1 |
import math
def UpperCAmelCase ( UpperCAmelCase )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase = 1 / 12345 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 3
while True:
SCREAMING_SNAKE_CASE_ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = int(UpperCAmelCase )
total_partitions += 1
if check_partition_perfect(UpperCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(UpperCAmelCase )
integer += 1
if __name__ == "__main__":
print(F'{solution() = }')
| 393 |
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
__snake_case = Mock()
__snake_case = conn, Mock()
__snake_case = iter([1, None] )
__snake_case = lambda SCREAMING_SNAKE_CASE : next(SCREAMING_SNAKE_CASE )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=SCREAMING_SNAKE_CASE )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 163 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCamelCase__ :
'''simple docstring'''
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None # sigma(t_i)
@classmethod
def UpperCamelCase_ ( cls ):
return cls()
@dataclass
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 42
_UpperCamelCase = 42
_UpperCamelCase = 42
class UpperCamelCase__ (a ,a ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
return True
@register_to_config
def __init__( self ,_lowerCAmelCase = 0.02 ,_lowerCAmelCase = 1_00 ,_lowerCAmelCase = 1.007 ,_lowerCAmelCase = 80 ,_lowerCAmelCase = 0.05 ,_lowerCAmelCase = 50 ,):
pass
def UpperCamelCase_ ( self ):
return KarrasVeSchedulerState.create()
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = () ):
lowerCamelCase__ = jnp.arange(0 ,_lowerCAmelCase )[::-1].copy()
lowerCamelCase__ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_lowerCAmelCase ,schedule=jnp.array(_lowerCAmelCase ,dtype=jnp.floataa ) ,timesteps=_lowerCAmelCase ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase__ = min(self.config.s_churn / state.num_inference_steps ,2**0.5 - 1 )
else:
lowerCamelCase__ = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase__ = random.split(_lowerCAmelCase ,num=1 )
lowerCamelCase__ = self.config.s_noise * random.normal(key=_lowerCAmelCase ,shape=sample.shape )
lowerCamelCase__ = sigma + gamma * sigma
lowerCamelCase__ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = True ,):
lowerCamelCase__ = sample_hat + sigma_hat * model_output
lowerCamelCase__ = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase__ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_lowerCAmelCase ,derivative=_lowerCAmelCase ,state=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = True ,):
lowerCamelCase__ = sample_prev + sigma_prev * model_output
lowerCamelCase__ = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase__ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_lowerCAmelCase ,derivative=_lowerCAmelCase ,state=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
raise NotImplementedError()
| 710 |
'''simple docstring'''
UpperCamelCase : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCamelCase : list[bool | None] = [None] * 10_00_00_00
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[int] = False
def A__ ( __lowerCAmelCase : int ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase__ = chain(next_number(__lowerCAmelCase ) )
lowerCamelCase__ = number_chain
while number < 1000_0000:
lowerCamelCase__ = number_chain
number *= 10
return number_chain
def A__ ( __lowerCAmelCase : int = 1000_0000 ):
for i in range(1 , __lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 9 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=A__ ):
a__ : Optional[Any] = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=A__ ):
a__ : Optional[int] = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=A__ ):
a__ : Union[str, Any] = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=A__ ):
a__ : int = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=A__ ):
a__ : int = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=A__ ):
a__ : List[str] = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=A__ ):
a__ : Tuple = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=A__ ):
a__ : Tuple = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=A__ ):
a__ : Optional[int] = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=A__ ):
a__ : str = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=A__ ):
a__ : str = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=A__ ):
a__ : int = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=A__ ):
a__ : Optional[int] = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
| 134 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE : Optional[Any] = get_tests_dir('''fixtures''')
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Any ) -> Any:
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE__ = mock.Mock()
SCREAMING_SNAKE_CASE__ = 500
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = HTTPError
SCREAMING_SNAKE_CASE__ = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=__lowerCamelCase ) as mock_head:
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase_ ( self : int ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowercase_ ( cls : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = TOKEN
HfFolder.save_token(__lowerCamelCase )
@classmethod
def lowercase_ ( cls : Optional[int] ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def lowercase_ ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCamelCase , repo_id='''test-feature-extractor''' , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
def lowercase_ ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCamelCase , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
def lowercase_ ( self : int ) -> int:
CustomFeatureExtractor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ = CustomFeatureExtractor.from_pretrained(__lowerCamelCase )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
f'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=__lowerCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 493 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowercase__ : int = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : List[Any] ):
'''simple docstring'''
for attribute in key.split('''.''' ):
UpperCAmelCase_ = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
UpperCAmelCase_ = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
UpperCAmelCase_ = None
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase_ = True
elif name.split('''.''' )[0] == "proj":
UpperCAmelCase_ = fairseq_model.proj
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(_UpperCamelCase )[0].split('''.''' )[-2]
UpperCAmelCase_ = mapped_key.replace('''*''' , _UpperCamelCase )
if "weight_g" in name:
UpperCAmelCase_ = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase_ = '''weight_v'''
elif "bias" in name:
UpperCAmelCase_ = '''bias'''
elif "weight" in name:
UpperCAmelCase_ = '''weight'''
else:
UpperCAmelCase_ = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase_ = name.split('''.''' )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase_ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : Any ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = emb.weight.shape
UpperCAmelCase_ = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
UpperCAmelCase_ = emb.weight.data
return lin_layer
def __lowerCamelCase ( _UpperCamelCase : Dict ):
'''simple docstring'''
with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [line.split(''' ''' )[0] for line in lines]
UpperCAmelCase_ = len(_UpperCamelCase )
UpperCAmelCase_ = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(_UpperCamelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int , ):
'''simple docstring'''
UpperCAmelCase_ = WavaVecaConfig.from_pretrained(_UpperCamelCase )
UpperCAmelCase_ = SpeechaTextaConfig.from_pretrained(
_UpperCamelCase , vocab_size=_UpperCamelCase , decoder_layers=_UpperCamelCase , do_stable_layer_norm=_UpperCamelCase )
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
UpperCAmelCase_ = model[0].eval()
# set weights for wav2vec2 encoder
UpperCAmelCase_ = WavaVecaModel(_UpperCamelCase )
UpperCAmelCase_ = recursively_load_weights_wavaveca(model.encoder , _UpperCamelCase )
UpperCAmelCase_ = SpeechaTextaForCausalLM(_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_UpperCamelCase )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
UpperCAmelCase_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
UpperCAmelCase_ = SpeechEncoderDecoderModel(encoder=_UpperCamelCase , decoder=_UpperCamelCase )
UpperCAmelCase_ = False
# add projection layer
UpperCAmelCase_ = nn.Parameter(projection_layer.weight )
UpperCAmelCase_ = nn.Parameter(projection_layer.bias )
UpperCAmelCase_ = create_vocab_dict(_UpperCamelCase )
with open(os.path.join(_UpperCamelCase , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = SpeechaTextaTokenizer(os.path.join(_UpperCamelCase , '''vocab.json''' ) )
tokenizer.save_pretrained(_UpperCamelCase )
UpperCAmelCase_ = hf_wavavec.config.to_dict()
UpperCAmelCase_ = tokenizer.pad_token_id
UpperCAmelCase_ = tokenizer.bos_token_id
UpperCAmelCase_ = tokenizer.eos_token_id
UpperCAmelCase_ = '''speech_to_text_2'''
UpperCAmelCase_ = '''wav2vec2'''
UpperCAmelCase_ = SpeechEncoderDecoderConfig.from_dict(_UpperCamelCase )
hf_wavavec.save_pretrained(_UpperCamelCase )
feature_extractor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
lowercase__ : List[str] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 710 | '''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase__ : Tuple = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ):
'''simple docstring'''
inspect_dataset(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(_UpperCamelCase )
assert "__pycache__" not in os.listdir(_UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
inspect_metric(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(_UpperCamelCase )
assert "__pycache__" not in os.listdir(_UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
with pytest.raises(_UpperCamelCase ):
get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_config_names(_UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase )
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ):
'''simple docstring'''
with pytest.raises(_UpperCamelCase ):
get_dataset_split_names(_UpperCamelCase , config_name=_UpperCamelCase )
| 43 | 0 |
import re
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
try:
A_ = split_input(SCREAMING_SNAKE_CASE )
if upper:
A_ = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
A_ = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return to_simple_case(SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
try:
A_ = to_simple_case(SCREAMING_SNAKE_CASE )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return to_complex_case(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''_''' )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return to_complex_case(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''-''' )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 203 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__lowercase = """pt"""
elif is_tf_available():
__lowercase = """tf"""
else:
__lowercase = """jax"""
class _lowercase ( __lowerCamelCase,unittest.TestCase ):
_lowercase : Dict = PerceiverTokenizer
_lowercase : Dict = False
def UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
super().setUp()
A_ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def UpperCamelCase ( self : List[str] , **lowerCamelCase__ : str ) -> PerceiverTokenizer:
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCamelCase ( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : List[str]=2_0 , lowerCamelCase__ : Union[str, Any]=5 ) -> Tuple[str, list]:
"""simple docstring"""
A_ = []
for i in range(len(lowerCamelCase__ ) ):
try:
A_ = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCamelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
A_ = list(filter(lambda lowerCamelCase__ : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , lowerCamelCase__ ) )
A_ = list(filter(lambda lowerCamelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCamelCase__ ) , lowerCamelCase__ ) )
if max_length is not None and len(lowerCamelCase__ ) > max_length:
A_ = toks[:max_length]
if min_length is not None and len(lowerCamelCase__ ) < min_length and len(lowerCamelCase__ ) > 0:
while len(lowerCamelCase__ ) < min_length:
A_ = toks + toks
# toks_str = [t[1] for t in toks]
A_ = [t[0] for t in toks]
# Ensure consistency
A_ = tokenizer.decode(lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
if " " not in output_txt and len(lowerCamelCase__ ) > 1:
A_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCamelCase__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCamelCase__ )
)
if with_prefix_space:
A_ = ''' ''' + output_txt
A_ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
return output_txt, output_ids
def UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
A_ = self.perceiver_tokenizer
A_ = '''Unicode €.'''
A_ = tokenizer(lowerCamelCase__ )
A_ = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , lowerCamelCase__ )
# decoding
A_ = tokenizer.decode(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , '''[CLS]Unicode €.[SEP]''' )
A_ = tokenizer('''e è é ê ë''' )
A_ = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , lowerCamelCase__ )
# decoding
A_ = tokenizer.decode(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.perceiver_tokenizer
A_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
A_ = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
A_ = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
if FRAMEWORK != "jax":
A_ = list(batch.input_ids.numpy()[0] )
else:
A_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
A_ = self.perceiver_tokenizer
A_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
A_ = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowerCamelCase__ )
self.assertIn('''attention_mask''' , lowerCamelCase__ )
self.assertNotIn('''decoder_input_ids''' , lowerCamelCase__ )
self.assertNotIn('''decoder_attention_mask''' , lowerCamelCase__ )
def UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.perceiver_tokenizer
A_ = [
'''Summary of the text.''',
'''Another summary.''',
]
A_ = tokenizer(
text_target=lowerCamelCase__ , max_length=3_2 , padding='''max_length''' , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def UpperCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
A_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
A_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
A_ = tempfile.mkdtemp()
A_ = ''' He is very happy, UNwant\u00E9d,running'''
A_ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
tokenizer.save_pretrained(lowerCamelCase__ )
A_ = tokenizer.__class__.from_pretrained(lowerCamelCase__ )
A_ = after_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
shutil.rmtree(lowerCamelCase__ )
A_ = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
A_ = tempfile.mkdtemp()
A_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
A_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
A_ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
tokenizer.save_pretrained(lowerCamelCase__ )
A_ = tokenizer.__class__.from_pretrained(lowerCamelCase__ )
A_ = after_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
A_ = tokenizer.__class__.from_pretrained(lowerCamelCase__ , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(lowerCamelCase__ )
def UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
A_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
A_ = json.load(lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
A_ = json.load(lowerCamelCase__ )
A_ = [F"<extra_id_{i}>" for i in range(1_2_5 )]
A_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
A_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowerCamelCase__ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
A_ = tokenizer_class.from_pretrained(
lowerCamelCase__ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
A_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowerCamelCase__ )]
A_ = tokenizer_class.from_pretrained(
lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
A_ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' )
def UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
pass
def UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
pass
def UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
pass
def UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
pass
def UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
A_ = self.get_tokenizers(fast=lowerCamelCase__ , do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
A_ = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
A_ = tokenizer.convert_tokens_to_string(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
| 203 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Dict = logging.get_logger(__name__)
_a : Tuple = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class _UpperCAmelCase ( _snake_case ):
"""simple docstring"""
A = """pix2struct_text_model"""
A = ["""past_key_values"""]
A = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , _lowerCAmelCase=50_244 , _lowerCAmelCase=768 , _lowerCAmelCase=64 , _lowerCAmelCase=2_048 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=32 , _lowerCAmelCase=128 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1e-6 , _lowerCAmelCase=1.0 , _lowerCAmelCase="gelu_new" , _lowerCAmelCase=0 , _lowerCAmelCase=False , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=True , **_lowerCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :int = vocab_size
lowerCAmelCase__ :List[str] = hidden_size
lowerCAmelCase__ :int = d_kv
lowerCAmelCase__ :Any = d_ff
lowerCAmelCase__ :int = num_layers
lowerCAmelCase__ :List[str] = num_heads
lowerCAmelCase__ :Any = relative_attention_num_buckets
lowerCAmelCase__ :Union[str, Any] = relative_attention_max_distance
lowerCAmelCase__ :str = dropout_rate
lowerCAmelCase__ :str = layer_norm_epsilon
lowerCAmelCase__ :Optional[Any] = initializer_factor
lowerCAmelCase__ :Any = use_cache
lowerCAmelCase__ :Dict = eos_token_id
lowerCAmelCase__ :Dict = decoder_start_token_id
# for backwards compatibility
lowerCAmelCase__ :List[str] = dense_act_fn
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , tie_word_embeddings=snake_case_ , is_decoder=snake_case_ , **snake_case_ , )
@classmethod
def snake_case_ ( cls , _lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case_ )
lowerCAmelCase__ :Dict = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowerCAmelCase__ :int = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case_ , **snake_case_ )
class _UpperCAmelCase ( _snake_case ):
"""simple docstring"""
A = """pix2struct_vision_model"""
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=768 , _lowerCAmelCase=2_048 , _lowerCAmelCase=64 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase="gelu_new" , _lowerCAmelCase=1e-6 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=1e-10 , _lowerCAmelCase=1.0 , _lowerCAmelCase=4_096 , _lowerCAmelCase=32 , _lowerCAmelCase=128 , **_lowerCAmelCase , ):
'''simple docstring'''
super().__init__(**snake_case_ )
lowerCAmelCase__ :Optional[Any] = hidden_size
lowerCAmelCase__ :Dict = patch_embed_hidden_size
lowerCAmelCase__ :Tuple = d_ff
lowerCAmelCase__ :Tuple = dropout_rate
lowerCAmelCase__ :int = num_hidden_layers
lowerCAmelCase__ :Optional[int] = num_attention_heads
lowerCAmelCase__ :Any = initializer_range
lowerCAmelCase__ :List[str] = initializer_factor
lowerCAmelCase__ :Tuple = attention_dropout
lowerCAmelCase__ :Optional[int] = layer_norm_eps
lowerCAmelCase__ :int = dense_act_fn
lowerCAmelCase__ :int = seq_len
lowerCAmelCase__ :Optional[Any] = relative_attention_num_buckets
lowerCAmelCase__ :str = relative_attention_max_distance
lowerCAmelCase__ :List[Any] = d_kv
@classmethod
def snake_case_ ( cls , _lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case_ )
lowerCAmelCase__ :str = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowerCAmelCase__ :Optional[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case_ , **snake_case_ )
class _UpperCAmelCase ( _snake_case ):
"""simple docstring"""
A = """pix2struct"""
A = True
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=1.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=True , **_lowerCAmelCase , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=snake_case_ , is_encoder_decoder=snake_case_ , **snake_case_ )
if text_config is None:
lowerCAmelCase__ :List[str] = {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
lowerCAmelCase__ :Optional[Any] = {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
lowerCAmelCase__ :Tuple = PixaStructTextConfig(**snake_case_ )
lowerCAmelCase__ :Optional[int] = PixaStructVisionConfig(**snake_case_ )
lowerCAmelCase__ :List[str] = self.text_config.decoder_start_token_id
lowerCAmelCase__ :List[str] = self.text_config.pad_token_id
lowerCAmelCase__ :Dict = self.text_config.eos_token_id
lowerCAmelCase__ :Union[str, Any] = initializer_factor
lowerCAmelCase__ :str = initializer_range
lowerCAmelCase__ :Any = self.initializer_range
lowerCAmelCase__ :Optional[int] = self.initializer_range
lowerCAmelCase__ :List[str] = is_vqa
@classmethod
def snake_case_ ( cls , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_ )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ :str = self.text_config.to_dict()
lowerCAmelCase__ :Optional[int] = self.vision_config.to_dict()
lowerCAmelCase__ :Optional[int] = self.__class__.model_type
return output
| 721 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=32 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=[10, 20, 30, 40] , _lowerCAmelCase=[2, 2, 3, 2] , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=10 , _lowerCAmelCase=0.02 , _lowerCAmelCase=["stage2", "stage3", "stage4"] , _lowerCAmelCase=3 , _lowerCAmelCase=None , ):
'''simple docstring'''
lowerCAmelCase__ :str = parent
lowerCAmelCase__ :Dict = batch_size
lowerCAmelCase__ :str = image_size
lowerCAmelCase__ :int = num_channels
lowerCAmelCase__ :Tuple = num_stages
lowerCAmelCase__ :Optional[int] = hidden_sizes
lowerCAmelCase__ :Tuple = depths
lowerCAmelCase__ :Tuple = is_training
lowerCAmelCase__ :str = use_labels
lowerCAmelCase__ :int = intermediate_size
lowerCAmelCase__ :List[str] = hidden_act
lowerCAmelCase__ :Dict = type_sequence_label_size
lowerCAmelCase__ :Any = initializer_range
lowerCAmelCase__ :List[str] = out_features
lowerCAmelCase__ :List[Any] = num_labels
lowerCAmelCase__ :str = scope
lowerCAmelCase__ :Dict = num_stages
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ :List[str] = None
if self.use_labels:
lowerCAmelCase__ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ :Optional[Any] = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def snake_case_ ( self ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowerCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowerCAmelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = UperNetForSemanticSegmentation(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCAmelCase__ :List[Any] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) :Dict = config_and_inputs
lowerCAmelCase__ :Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
"""simple docstring"""
A = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
A = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
A = False
A = False
A = False
A = False
A = False
A = False
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = UperNetModelTester(self )
lowerCAmelCase__ :Any = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def snake_case_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self ):
'''simple docstring'''
return
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ :Optional[int] = model_class(_lowerCAmelCase )
lowerCAmelCase__ :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ :Optional[Any] = [*signature.parameters.keys()]
lowerCAmelCase__ :Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ):
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCAmelCase__ :int = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ :Dict = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowerCAmelCase__ :int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ :Tuple = self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase__ ,lowerCAmelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ :str = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ :str = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ :str = _config_zero_init(_lowerCAmelCase )
lowerCAmelCase__ :List[Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCAmelCase__ :List[Any] = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason="UperNet does not have tied weights" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@slow
def snake_case_ ( self ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ :List[str] = UperNetForSemanticSegmentation.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def snake_case__ ( ):
lowerCAmelCase__ :int = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
lowerCAmelCase__ :Union[str, Any] = Image.open(UpperCAmelCase ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
lowerCAmelCase__ :Dict = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(_lowerCAmelCase )
lowerCAmelCase__ :str = prepare_img()
lowerCAmelCase__ :str = processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :int = model(**_lowerCAmelCase )
lowerCAmelCase__ :str = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowerCAmelCase__ :Tuple = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
lowerCAmelCase__ :Optional[int] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(_lowerCAmelCase )
lowerCAmelCase__ :List[Any] = prepare_img()
lowerCAmelCase__ :Tuple = processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :Union[str, Any] = model(**_lowerCAmelCase )
lowerCAmelCase__ :str = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowerCAmelCase__ :str = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 111 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class A ( unittest.TestCase ):
def __init__(self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=3_0 , lowerCAmelCase=4_0_0 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=True , lowerCAmelCase=1 / 2_5_5 , lowerCAmelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowercase= size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
__lowercase= parent
__lowercase= batch_size
__lowercase= num_channels
__lowercase= min_resolution
__lowercase= max_resolution
__lowercase= do_resize
__lowercase= size
__lowercase= do_normalize
__lowercase= image_mean
__lowercase= image_std
__lowercase= do_rescale
__lowercase= rescale_factor
__lowercase= do_pad
def _A (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _A (self , lowerCAmelCase , lowerCAmelCase=False ):
if not batched:
__lowercase= image_inputs[0]
if isinstance(lowerCAmelCase , Image.Image ):
__lowercase, __lowercase= image.size
else:
__lowercase, __lowercase= image.shape[1], image.shape[2]
if w < h:
__lowercase= int(self.size['shortest_edge'] * h / w )
__lowercase= self.size['shortest_edge']
elif w > h:
__lowercase= self.size['shortest_edge']
__lowercase= int(self.size['shortest_edge'] * w / h )
else:
__lowercase= self.size['shortest_edge']
__lowercase= self.size['shortest_edge']
else:
__lowercase= []
for image in image_inputs:
__lowercase, __lowercase= self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowercase= max(lowerCAmelCase , key=lambda lowerCAmelCase : item[0] )[0]
__lowercase= max(lowerCAmelCase , key=lambda lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( A_ , unittest.TestCase ):
UpperCamelCase_ : List[str] =DeformableDetrImageProcessor if is_vision_available() else None
def _A (self ):
__lowercase= DeformableDetrImageProcessingTester(self )
@property
def _A (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A (self ):
__lowercase= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'do_rescale' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'do_pad' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'size' ) )
def _A (self ):
__lowercase= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase )
__lowercase= self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=lowerCAmelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase )
def _A (self ):
pass
def _A (self ):
# Initialize image_processing
__lowercase= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
__lowercase= image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowercase, __lowercase= self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase, __lowercase= self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase )
__lowercase= image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A (self ):
# Initialize image_processing
__lowercase= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
__lowercase= image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowercase, __lowercase= self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase= image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values
__lowercase, __lowercase= self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A (self ):
# Initialize image_processing
__lowercase= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
__lowercase= image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowercase, __lowercase= self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase= image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values
__lowercase, __lowercase= self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _A (self ):
# prepare image and target
__lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__lowercase= json.loads(f.read() )
__lowercase= {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
__lowercase= DeformableDetrImageProcessor()
__lowercase= image_processing(images=lowerCAmelCase , annotations=lowerCAmelCase , return_tensors='pt' )
# verify pixel values
__lowercase= torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , lowerCAmelCase )
__lowercase= torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCAmelCase , atol=1E-4 ) )
# verify area
__lowercase= torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCAmelCase ) )
# verify boxes
__lowercase= torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCAmelCase )
__lowercase= torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCAmelCase , atol=1E-3 ) )
# verify image_id
__lowercase= torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCAmelCase ) )
# verify is_crowd
__lowercase= torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCAmelCase ) )
# verify class_labels
__lowercase= torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCAmelCase ) )
# verify orig_size
__lowercase= torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCAmelCase ) )
# verify size
__lowercase= torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCAmelCase ) )
@slow
def _A (self ):
# prepare image, target and masks_path
__lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__lowercase= json.loads(f.read() )
__lowercase= {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
__lowercase= pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__lowercase= DeformableDetrImageProcessor(format='coco_panoptic' )
__lowercase= image_processing(images=lowerCAmelCase , annotations=lowerCAmelCase , masks_path=lowerCAmelCase , return_tensors='pt' )
# verify pixel values
__lowercase= torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , lowerCAmelCase )
__lowercase= torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCAmelCase , atol=1E-4 ) )
# verify area
__lowercase= torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCAmelCase ) )
# verify boxes
__lowercase= torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCAmelCase )
__lowercase= torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCAmelCase , atol=1E-3 ) )
# verify image_id
__lowercase= torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCAmelCase ) )
# verify is_crowd
__lowercase= torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCAmelCase ) )
# verify class_labels
__lowercase= torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCAmelCase ) )
# verify masks
__lowercase= 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowerCAmelCase )
# verify orig_size
__lowercase= torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCAmelCase ) )
# verify size
__lowercase= torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCAmelCase ) )
| 230 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowerCAmelCase = logging.get_logger(__name__)
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Tuple:
'''simple docstring'''
__lowercase= set()
__lowercase= []
def parse_line(lowercase__ ):
for line in fp:
if isinstance(lowercase__ , lowercase__ ):
__lowercase= line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(lowercase__ ) > 0:
__lowercase= '\n'.join(lowercase__ )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(lowercase__ )
buffer.clear()
continue
else:
__lowercase= line.strip()
buffer.append(lowercase__ )
if from_gh:
for filename in os.listdir(lowercase__ ):
__lowercase= os.path.join(lowercase__ , lowercase__ )
if not os.path.isdir(lowercase__ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowercase__ ) as fp:
parse_line(lowercase__ )
else:
try:
with zipfile.ZipFile(lowercase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase__ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowercase__ ) as fp:
parse_line(lowercase__ )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase= set()
__lowercase= [os.path.join(lowercase__ , lowercase__ ) for p in os.listdir(lowercase__ ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowercase__ , lowercase__ ) )
return selected_warnings
if __name__ == "__main__":
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return values.split(',' )
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowerCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowerCAmelCase = extract_warnings(args.output_dir, args.targets)
lowerCAmelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 230 | 1 |
'''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def snake_case_ ():
UpperCAmelCase = 9
UpperCAmelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
UpperCAmelCase = kruskal(_a , _a )
UpperCAmelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_a ) == sorted(_a )
| 358 |
'''simple docstring'''
def snake_case_ (_a : list[list[int]] , _a : int , _a : int , _a : list[int] ):
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case_ (_a : list[list[int]] , _a : list[int] , _a : int ):
# Base Case
if curr_ind == len(_a ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_a ) ):
if valid_connection(_a , _a , _a , _a ):
# Insert current vertex into path as next transition
UpperCAmelCase = next_ver
# Validate created path
if util_hamilton_cycle(_a , _a , curr_ind + 1 ):
return True
# Backtrack
UpperCAmelCase = -1
return False
def snake_case_ (_a : list[list[int]] , _a : int = 0 ):
UpperCAmelCase = [-1] * (len(_a ) + 1)
# initialize start and end of path with starting index
UpperCAmelCase = UpperCAmelCase = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_a , _a , 1 ) else []
| 358 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCAmelCase_ = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
UpperCAmelCase_ = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
)
UpperCAmelCase_ = '|'.join(sys.argv[1:])
UpperCAmelCase_ = re.compile(RF'''^({joined_dirs}).*?\.py$''')
UpperCAmelCase_ = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 253 |
def lowerCAmelCase_ ( __UpperCAmelCase: float ) -> float:
return 10 - x * x
def lowerCAmelCase_ ( __UpperCAmelCase: float , __UpperCAmelCase: float ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(__UpperCAmelCase ) * equation(__UpperCAmelCase ) >= 0:
raise ValueError('''Wrong space!''' )
UpperCamelCase__ : Optional[int] = a
while (b - a) >= 0.01:
# Find middle point
UpperCamelCase__ : int = (a + b) / 2
# Check if middle point is root
if equation(__UpperCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__UpperCAmelCase ) * equation(__UpperCAmelCase ) < 0:
UpperCamelCase__ : str = c
else:
UpperCamelCase__ : Optional[int] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 253 | 1 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class UpperCamelCase :
"""simple docstring"""
pass
| 707 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def snake_case ( UpperCAmelCase : List[Any] ):
A = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase, UpperCAmelCase )
def snake_case ( UpperCAmelCase : Union[str, Any] ):
A = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A = s_dict.pop(UpperCAmelCase )
elif "subsample" in key:
A = s_dict.pop(UpperCAmelCase )
def snake_case ( UpperCAmelCase : Union[str, Any] ):
A , A = emb.weight.shape
A = nn.Linear(UpperCAmelCase, UpperCAmelCase, bias=UpperCAmelCase )
A = emb.weight.data
return lin_layer
def snake_case ( UpperCAmelCase : str, UpperCAmelCase : str ):
A = torch.load(UpperCAmelCase, map_location='cpu' )
A = mam_aaa['args']
A = mam_aaa['model']
A = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(UpperCAmelCase )
rename_keys(UpperCAmelCase )
A = state_dict['decoder.embed_tokens.weight'].shape[0]
A = args.share_decoder_input_output_embed
A = [int(UpperCAmelCase ) for i in args.conv_kernel_sizes.split(',' )]
A = SpeechaTextConfig(
vocab_size=UpperCAmelCase, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='relu', num_conv_layers=len(UpperCAmelCase ), conv_channels=args.conv_channels, conv_kernel_sizes=UpperCAmelCase, input_feat_per_channel=args.input_feat_per_channel, input_channels=args.input_channels, tie_word_embeddings=UpperCAmelCase, num_beams=5, max_length=2_00, use_cache=UpperCAmelCase, decoder_start_token_id=2, early_stopping=UpperCAmelCase, )
A = SpeechaTextForConditionalGeneration(UpperCAmelCase )
A , A = model.model.load_state_dict(UpperCAmelCase, strict=UpperCAmelCase )
if len(UpperCAmelCase ) > 0 and not set(UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f' but all the following weights are missing {missing}' )
if tie_embeds:
A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A = lm_head_weights
model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase_ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 110 | 0 |
def _UpperCamelCase ( snake_case__ ) -> Optional[Any]:
__UpperCAmelCase : Dict = []
if len(snake_case__ ) == 1:
return [nums.copy()]
for _ in range(len(snake_case__ ) ):
__UpperCAmelCase : List[Any] = nums.pop(0 )
__UpperCAmelCase : Dict = permute(snake_case__ )
for perm in permutations:
perm.append(snake_case__ )
result.extend(snake_case__ )
nums.append(snake_case__ )
return result
def _UpperCamelCase ( snake_case__ ) -> Optional[int]:
def backtrack(snake_case__ ):
if start == len(snake_case__ ) - 1:
output.append(nums[:] )
else:
for i in range(snake_case__, len(snake_case__ ) ):
__UpperCAmelCase : Any = nums[i], nums[start]
backtrack(start + 1 )
__UpperCAmelCase : Tuple = nums[i], nums[start] # backtrack
__UpperCAmelCase : Optional[int] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
_snake_case = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 382 |
from __future__ import annotations
def lowercase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : list[str] | None = None ):
"""simple docstring"""
snake_case__ : Optional[int] =word_bank or []
# create a table
snake_case__ : int =len(SCREAMING_SNAKE_CASE ) + 1
snake_case__ : list[list[list[str]]] =[]
for _ in range(SCREAMING_SNAKE_CASE ):
table.append([] )
# seed value
snake_case__ : Dict =[[]] # because empty string has empty combination
# iterate through the indices
for i in range(SCREAMING_SNAKE_CASE ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(SCREAMING_SNAKE_CASE )] == word:
snake_case__ : list[list[str]] =[
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(SCREAMING_SNAKE_CASE )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(SCREAMING_SNAKE_CASE )]:
combination.reverse()
return table[len(SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 381 | 0 |
import math
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
assert isinstance(__UpperCamelCase , __UpperCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
lowercase_ = range(3 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: List[Any]=1 , **__lowerCamelCase: str ):
lowercase_ = factor * value
lowercase_ = value
while not is_prime(__UpperCamelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__UpperCamelCase )
return value
| 711 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
SCREAMING_SNAKE_CASE__ = True
from torch.cuda.amp import autocast
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Whether to log verbose messages or not."} , )
lowerCAmelCase__ = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
lowerCAmelCase__ = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
lowerCAmelCase__ = field(
default=0.999_995 , metadata={"help": "Decay of gumbel temperature during training."} )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: ModelArguments , __lowerCamelCase: TrainingArguments ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase_ = logging.WARNING
if model_args.verbose_logging:
lowercase_ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
lowercase_ = logging.INFO
logger.setLevel(__lowerCamelCase )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase__ = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
lowerCAmelCase__ = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
lowerCAmelCase__ = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowerCAmelCase__ = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCAmelCase__ = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = "longest"
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __call__( self , UpperCAmelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
lowercase_ = self.feature_extractor.pad(
UpperCAmelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
lowercase_ = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
lowercase_ = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowercase_ = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
lowercase_ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowercase_ = 1
lowercase_ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowercase_ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=UpperCAmelCase , min_masks=2 , )
return batch
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , *UpperCAmelCase , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=1.0 , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
lowercase_ = 0
lowercase_ = max_gumbel_temp
lowercase_ = min_gumbel_temp
lowercase_ = gumbel_temp_decay
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> torch.Tensor:
'''simple docstring'''
model.train()
lowercase_ = self._prepare_inputs(UpperCAmelCase )
if self.use_amp:
with autocast():
lowercase_ = self.compute_loss(UpperCAmelCase , UpperCAmelCase )
else:
lowercase_ = self.compute_loss(UpperCAmelCase , UpperCAmelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowercase_ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowercase_ = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(F'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
lowercase_ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCAmelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
configure_logger(__lowerCamelCase , __lowerCamelCase )
# Downloading and loading a dataset from the hub.
lowercase_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowercase_ = DatasetDict()
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowercase_ = DatasetDict()
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowercase_ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__lowerCamelCase )
def prepare_dataset(__lowerCamelCase: Dict ):
# check that all files have the correct sampling rate
lowercase_ , lowercase_ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
lowercase_ = datasets.map(
__lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
lowercase_ = vectorized_datasets.filter(
lambda __lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__lowerCamelCase: Optional[Any] ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
lowercase_ = vectorized_datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowercase_ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
lowercase_ = WavaVecaForPreTraining(__lowerCamelCase )
lowercase_ = DataCollatorForWavaVecaPretraining(model=__lowerCamelCase , feature_extractor=__lowerCamelCase )
lowercase_ = WavaVecaPreTrainer(
model=__lowerCamelCase , data_collator=__lowerCamelCase , args=__lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=__lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 601 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """sentencepiece.model"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
}
UpperCamelCase_ = {
"""google/rembert""": 2_56,
}
class a_ (_a ):
__lowerCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case_ , snake_case_=False , snake_case_=True , snake_case_=True , snake_case_="[CLS]" , snake_case_="[SEP]" , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , **snake_case_ , ):
super().__init__(
do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
_lowerCAmelCase : List[Any] = do_lower_case
_lowerCAmelCase : Optional[int] = remove_space
_lowerCAmelCase : List[Any] = keep_accents
_lowerCAmelCase : Tuple = vocab_file
_lowerCAmelCase : Union[str, Any] = spm.SentencePieceProcessor()
self.sp_model.Load(snake_case_ )
@property
def __UpperCamelCase ( self ):
return len(self.sp_model )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : str = self.__dict__.copy()
_lowerCAmelCase : int = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : Optional[int] = d
_lowerCAmelCase : Optional[int] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self , snake_case_ , snake_case_=False ):
_lowerCAmelCase : Any = self.sp_model.EncodeAsPieces(snake_case_ )
return pieces
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.PieceToId(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.IdToPiece(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[int] = self.sp_model.decode_pieces(snake_case_ )
return out_string
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Optional[int] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if not os.path.isdir(snake_case_ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(snake_case_ ) )
return
_lowerCAmelCase : Optional[Any] = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 384 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _UpperCAmelCase ( _lowerCamelCase : NDArray[floataa] , _lowerCamelCase : NDArray[floataa] , _lowerCamelCase : list[int] , _lowerCamelCase : int , ) -> list[float]:
_lowerCAmelCase , _lowerCAmelCase : Dict = coefficient_matrix.shape
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = constant_matrix.shape
if rowsa != colsa:
_lowerCAmelCase : Any = f'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(_lowerCamelCase )
if colsa != 1:
_lowerCAmelCase : List[str] = f'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(_lowerCamelCase )
if rowsa != rowsa:
_lowerCAmelCase : Tuple = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
f'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(_lowerCamelCase )
if len(_lowerCamelCase ) != rowsa:
_lowerCAmelCase : int = (
"""Number of initial values must be equal to number of rows in coefficient """
f'matrix but received {len(_lowerCamelCase )} and {rowsa}'
)
raise ValueError(_lowerCamelCase )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
_lowerCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_lowerCAmelCase , _lowerCAmelCase : Any = table.shape
strictly_diagonally_dominant(_lowerCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(_lowerCamelCase ):
_lowerCAmelCase : int = []
for row in range(_lowerCamelCase ):
_lowerCAmelCase : Any = 0
for col in range(_lowerCamelCase ):
if col == row:
_lowerCAmelCase : List[Any] = table[row][col]
elif col == cols - 1:
_lowerCAmelCase : Tuple = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_lowerCAmelCase : str = (temp + val) / denom
new_val.append(_lowerCamelCase )
_lowerCAmelCase : int = new_val
return [float(_lowerCamelCase ) for i in new_val]
def _UpperCAmelCase ( _lowerCamelCase : NDArray[floataa] ) -> bool:
_lowerCAmelCase , _lowerCAmelCase : str = table.shape
_lowerCAmelCase : int = True
for i in range(0 , _lowerCamelCase ):
_lowerCAmelCase : Any = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 384 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = '▁'
snake_case_ : Optional[int] = {'vocab_file': 'spiece.model'}
snake_case_ : int = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
snake_case_ : List[str] = {
'google/reformer-crime-and-punishment': 524_288,
}
class lowercase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__="</s>" , lowerCamelCase__="<unk>" , lowerCamelCase__=[] , lowerCamelCase__ = None , **lowerCamelCase__ , ):
'''simple docstring'''
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
return self.sp_model.piece_to_id(UpperCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
UpperCamelCase = self.sp_model.IdToPiece(UpperCamelCase__ )
return token
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
UpperCamelCase = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , '''wb''' ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 711 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __snake_case ( _UpperCAmelCase : Optional[int], _UpperCAmelCase : Tuple, _UpperCAmelCase : Any):
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(_UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase)
count += _in_place_quick_sort(_UpperCAmelCase, _UpperCAmelCase, p - 1)
count += _in_place_quick_sort(_UpperCAmelCase, p + 1, _UpperCAmelCase)
return count
def __snake_case ( _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple, _UpperCAmelCase : Any):
UpperCamelCase = 0
UpperCamelCase = randint(_UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(_UpperCAmelCase, _UpperCAmelCase):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
snake_case_ : List[Any] = TemporaryFile()
snake_case_ : Optional[Any] = 100 # 1000 elements are to be sorted
snake_case_ , snake_case_ : List[str] = 0, 1 # mean and standard deviation
snake_case_ : Any = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
snake_case_ : Optional[int] = np.load(outfile)
snake_case_ : Optional[int] = len(M) - 1
snake_case_ : List[str] = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 350 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_UpperCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 146 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_UpperCamelCase = 2048
_UpperCamelCase = 4096
_UpperCamelCase = 42
_UpperCamelCase = os.environ.pop('''PROCESS_TRAIN''', '''false''')
_UpperCamelCase = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def UpperCamelCase_( snake_case__: Any ) -> List[Any]:
def choose_first(snake_case__: List[Any] , snake_case__: Any=False ):
assert isinstance(snake_case__ , snake_case__ )
if len(snake_case__ ) == 1:
UpperCAmelCase__ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
UpperCAmelCase__ = {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
UpperCAmelCase__ = {'id': example['id']}
UpperCAmelCase__ = example['annotations']
UpperCAmelCase__ = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
UpperCAmelCase__ = ['yes'] if 1 in yes_no_answer else ['no']
UpperCAmelCase__ = UpperCAmelCase__ = []
UpperCAmelCase__ = UpperCAmelCase__ = []
UpperCAmelCase__ = ['<cls>']
else:
UpperCAmelCase__ = ['short']
UpperCAmelCase__ = choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
UpperCAmelCase__ = ['long']
UpperCAmelCase__ = choose_first(annotation['long_answer'] , is_long_answer=snake_case__ )
UpperCAmelCase__ = []
answer.update(snake_case__ )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
UpperCAmelCase__ = True
else:
UpperCAmelCase__ = False
UpperCAmelCase__ = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , snake_case__ ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def UpperCamelCase_( snake_case__: Any , snake_case__: Optional[int]=False ) -> Dict:
UpperCAmelCase__ = _get_single_answer(snake_case__ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCAmelCase__ = example['document']['tokens']
UpperCAmelCase__ = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(snake_case__ ),
"answer": {
"start_token": -1_00, # ignore index in cross-entropy
"end_token": -1_00, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
UpperCAmelCase__ = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
UpperCAmelCase__ = example['document']['tokens']
UpperCAmelCase__ = answer['start_token']
UpperCAmelCase__ = answer['end_token']
UpperCAmelCase__ = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
UpperCAmelCase__ = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
UpperCAmelCase__ = doc['is_html'][answer['start_token'] : answer['end_token']]
UpperCAmelCase__ = doc['token'][answer['start_token'] : answer['end_token']]
UpperCAmelCase__ = ' '.join([old[i] for i in range(len(snake_case__ ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , snake_case__ , end='\n' )
print('Old:' , snake_case__ , end='\n\n' )
return {
"context": " ".join(snake_case__ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def UpperCamelCase_( snake_case__: Optional[int] , snake_case__: Optional[Any] , snake_case__: List[Any]=20_48 , snake_case__: Optional[int]=40_96 , snake_case__: Union[str, Any]=True ) -> Dict:
# overlap will be of doc_stride - q_len
UpperCAmelCase__ = get_context_and_ans(snake_case__ , assertion=snake_case__ )
UpperCAmelCase__ = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
UpperCAmelCase__ = tokenizer(example['question']['text'] , out['context'] ).input_ids
UpperCAmelCase__ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = input_ids[:q_len]
UpperCAmelCase__ = range(snake_case__ , len(snake_case__ ) , max_length - doc_stride )
for i in doc_start_indices:
UpperCAmelCase__ = i + max_length - q_len
UpperCAmelCase__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_00] * len(snake_case__ ),
"end_token": [-1_00] * len(snake_case__ ),
"category": category,
},
}
UpperCAmelCase__ = out['context'].split()
UpperCAmelCase__ = splitted_context[answer['end_token']]
UpperCAmelCase__ = len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=snake_case__ , ).input_ids )
UpperCAmelCase__ = len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=snake_case__ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
UpperCAmelCase__ = len(tokenizer(snake_case__ , add_special_tokens=snake_case__ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
UpperCAmelCase__ = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
UpperCAmelCase__ = answer['start_token']
UpperCAmelCase__ = answer['end_token']
if assertion:
UpperCAmelCase__ = tokenizer.decode(snake_case__ )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , snake_case__ , end='\n\n' )
if len(snake_case__ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
UpperCAmelCase__ = input_ids[:q_len]
UpperCAmelCase__ = range(snake_case__ , len(snake_case__ ) , max_length - doc_stride )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = [] # null, yes, no, long, short
for i in doc_start_indices:
UpperCAmelCase__ = i + max_length - q_len
UpperCAmelCase__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
UpperCAmelCase__ = start_token - i + q_len
UpperCAmelCase__ = end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
UpperCAmelCase__ = -1_00
UpperCAmelCase__ = -1_00
answers_category.append('null' )
UpperCAmelCase__ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(snake_case__ )
answers_end_token.append(snake_case__ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(snake_case__ ) )
print('Old:' , tokenizer.decode(snake_case__ ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def UpperCamelCase_( snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: List[Any]=20_48 , snake_case__: Tuple=40_96 , snake_case__: Optional[int]=False ) -> str:
UpperCAmelCase__ = get_strided_contexts_and_ans(
snake_case__ , snake_case__ , doc_stride=snake_case__ , max_length=snake_case__ , assertion=snake_case__ , )
return example
def UpperCamelCase_( snake_case__: List[Any] , snake_case__: str ) -> Tuple:
with jsonlines.open(snake_case__ , 'a' ) as writer:
for example in tqdm(snake_case__ , total=len(snake_case__ ) , desc='Saving samples ... ' ):
UpperCAmelCase__ = example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_UpperCamelCase = load_dataset('''natural_questions''')
_UpperCamelCase = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
_UpperCamelCase = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
_UpperCamelCase = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
_UpperCamelCase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_UpperCamelCase = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
_UpperCamelCase = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 146 | 1 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__ ( _A ):
'''simple docstring'''
def __init__( self : str , __A : CLIPSegForImageSegmentation , __A : CLIPSegProcessor , __A : AutoencoderKL , __A : CLIPTextModel , __A : CLIPTokenizer , __A : UNetaDConditionModel , __A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __A : StableDiffusionSafetyChecker , __A : CLIPImageProcessor , ) -> Tuple:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
lowerCAmelCase__ = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , __A , standard_warn=__A )
lowerCAmelCase__ = dict(scheduler.config )
lowerCAmelCase__ = 1
lowerCAmelCase__ = FrozenDict(__A )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
lowerCAmelCase__ = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , __A , standard_warn=__A )
lowerCAmelCase__ = dict(scheduler.config )
lowerCAmelCase__ = True
lowerCAmelCase__ = FrozenDict(__A )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=__A , segmentation_processor=__A , vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , scheduler=__A , safety_checker=__A , feature_extractor=__A , )
def lowercase__ ( self : Optional[int] , __A : Optional[Union[str, int]] = "auto" ) -> Optional[int]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__A )
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
self.enable_attention_slicing(__A )
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCAmelCase__ = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__A , __A )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__A , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : str , __A : Union[str, List[str]] , __A : Union[torch.FloatTensor, PIL.Image.Image] , __A : str , __A : int = 512 , __A : int = 512 , __A : int = 50 , __A : float = 7.5 , __A : Optional[Union[str, List[str]]] = None , __A : Optional[int] = 1 , __A : float = 0.0 , __A : Optional[torch.Generator] = None , __A : Optional[torch.FloatTensor] = None , __A : Optional[str] = "pil" , __A : bool = True , __A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __A : int = 1 , **__A : List[str] , ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
lowerCAmelCase__ = self.segmentation_model(**__A )
lowerCAmelCase__ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCAmelCase__ = self.numpy_to_pil(__A )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCAmelCase__ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__A , image=__A , mask_image=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , )
| 714 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 211 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_a = logging.get_logger(__name__)
def lowerCamelCase__ ( __snake_case ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(__snake_case, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__snake_case, (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__snake_case ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['pixel_values']
def __init__( self , __a = True , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = None , __a = True , __a = 1 / 2_55 , __a = True , __a = None , __a = None , **__a , ) -> None:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 2_24}
_UpperCamelCase = get_size_dict(__a , default_to_square=__a)
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a , param_name='''crop_size''')
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , __a , __a , __a = PILImageResampling.BILINEAR , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a , default_to_square=__a)
if "shortest_edge" in size:
_UpperCamelCase = get_resize_output_image_size(__a , size['''shortest_edge'''] , default_to_square=__a)
elif "height" in size and "width" in size:
_UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''')
return resize(__a , size=__a , resample=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''')
return center_crop(__a , size=(size['''height'''], size['''width''']) , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a , ) -> Dict:
'''simple docstring'''
return rescale(__a , scale=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
_UpperCamelCase = to_numpy_array(__a)
if do_resize:
_UpperCamelCase = self.resize(image=__a , size=__a , resample=__a)
if do_center_crop:
_UpperCamelCase = self.center_crop(__a , size=__a)
if do_rescale:
_UpperCamelCase = self.rescale(image=__a , scale=__a)
if do_normalize:
_UpperCamelCase = self.normalize(image=__a , mean=__a , std=__a)
_UpperCamelCase = to_channel_dimension_format(__a , __a)
return image
def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> PIL.Image.Image:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(__a , default_to_square=__a)
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(__a , param_name='''crop_size''')
if not valid_images(__a):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
_UpperCamelCase = make_batched(__a)
_UpperCamelCase = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
_UpperCamelCase = {'''pixel_values''': videos}
return BatchFeature(data=__a , tensor_type=__a)
| 19 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ( ) -> str:
"""simple docstring"""
A = HfArgumentParser(UpperCamelCase__ )
A = parser.parse_args_into_dataclasses()[0]
A = TensorFlowBenchmark(args=UpperCamelCase__ )
try:
A = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
A = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
A = """ """.join(str(UpperCamelCase__ ).split(""" """ )[:-1] )
A = """"""
A = eval(str(UpperCamelCase__ ).split(""" """ )[-1] )
A = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
A = full_error_msg + begin_error_msg + str(UpperCamelCase__ )
raise ValueError(UpperCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 641 | 0 |
from torch import nn
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case : str = class_size
_snake_case : Any = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_snake_case : Any = nn.Linear(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : Any = self.mlp(UpperCamelCase )
return logits
| 703 |
from random import randint, random
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: int = 5 , )-> list:
_snake_case : Dict = [[-1] * number_of_cells] # Create a highway without any car
_snake_case : List[str] = 0
_snake_case : List[str] = max(lowerCAmelCase , 0 )
while i < number_of_cells:
_snake_case : Optional[Any] = (
randint(0 , lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int )-> int:
_snake_case : Dict = 0
_snake_case : Optional[Any] = highway_now[car_index + 1 :]
for cell in range(len(lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCAmelCase , -1 )
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : List[Any] = len(lowerCAmelCase )
# Beforce calculations, the highway is empty
_snake_case : List[Any] = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_snake_case : int = min(highway_now[car_index] + 1 , lowerCAmelCase )
# Number of empty cell before the next car
_snake_case : Tuple = get_distance(lowerCAmelCase , lowerCAmelCase ) - 1
# We can't have the car causing an accident
_snake_case : Union[str, Any] = min(next_highway[car_index] , lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
_snake_case : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : Dict = len(highway[0] )
for i in range(lowerCAmelCase ):
_snake_case : Any = update(highway[i] , lowerCAmelCase , lowerCAmelCase )
_snake_case : Tuple = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
_snake_case : Union[str, Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_snake_case : Union[str, Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_snake_case : Tuple = speed
highway.append(lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__UpperCamelCase : int = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] ):
lowerCAmelCase = set()
lowerCAmelCase = []
def parse_line(_UpperCAmelCase : str ):
for line in fp:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCAmelCase = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(_UpperCAmelCase ) > 0:
lowerCAmelCase = '\n'.join(_UpperCAmelCase )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(_UpperCAmelCase )
buffer.clear()
continue
else:
lowerCAmelCase = line.strip()
buffer.append(_UpperCAmelCase )
if from_gh:
for filename in os.listdir(_UpperCAmelCase ):
lowerCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isdir(_UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(_UpperCAmelCase ) as fp:
parse_line(_UpperCAmelCase )
else:
try:
with zipfile.ZipFile(_UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_UpperCAmelCase ) as fp:
parse_line(_UpperCAmelCase )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ):
lowerCAmelCase = set()
lowerCAmelCase = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for p in os.listdir(_UpperCAmelCase ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_UpperCAmelCase , _UpperCAmelCase ) )
return selected_warnings
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] ):
return values.split(',' )
__UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : Optional[Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__UpperCamelCase : Optional[Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__UpperCamelCase : Any = extract_warnings(args.output_dir, args.targets)
__UpperCamelCase : Tuple = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 4 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
"""simple docstring"""
snake_case__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case__ = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case__ = field(default=UpperCamelCase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
snake_case__ = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
snake_case__ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
lowerCAmelCase__ = import_module("tasks" )
try:
lowerCAmelCase__ = getattr(lowerCAmelCase_ , model_args.task_type )
lowerCAmelCase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase_ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowerCAmelCase__ = token_classification_task.get_labels(data_args.labels )
lowerCAmelCase__ = dict(enumerate(lowerCAmelCase_ ) )
lowerCAmelCase__ = len(lowerCAmelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , labelaid={label: i for i, label in enumerate(lowerCAmelCase_ )} , cache_dir=model_args.cache_dir , )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowerCAmelCase__ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCAmelCase__ = (
TokenClassificationDataset(
token_classification_task=lowerCAmelCase_ , data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , labels=lowerCAmelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCAmelCase__ = (
TokenClassificationDataset(
token_classification_task=lowerCAmelCase_ , data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , labels=lowerCAmelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : np.ndarray ) -> Tuple[List[int], List[int]]:
lowerCAmelCase__ = np.argmax(lowerCAmelCase_ , axis=2 )
lowerCAmelCase__ , lowerCAmelCase__ = preds.shape
lowerCAmelCase__ = [[] for _ in range(lowerCAmelCase_ )]
lowerCAmelCase__ = [[] for _ in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(lowerCAmelCase_ : EvalPrediction ) -> Dict:
lowerCAmelCase__ , lowerCAmelCase__ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(lowerCAmelCase_ , lowerCAmelCase_ ),
"precision": precision_score(lowerCAmelCase_ , lowerCAmelCase_ ),
"recall": recall_score(lowerCAmelCase_ , lowerCAmelCase_ ),
"f1": fa_score(lowerCAmelCase_ , lowerCAmelCase_ ),
}
# Data collator
lowerCAmelCase__ = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCAmelCase__ = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase__ = trainer.evaluate()
lowerCAmelCase__ = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , lowerCAmelCase_ , lowerCAmelCase_ )
writer.write("%s = %s\n" % (key, value) )
results.update(lowerCAmelCase_ )
# Predict
if training_args.do_predict:
lowerCAmelCase__ = TokenClassificationDataset(
token_classification_task=lowerCAmelCase_ , data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , labels=lowerCAmelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = trainer.predict(lowerCAmelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = align_predictions(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , lowerCAmelCase_ , lowerCAmelCase_ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
lowerCAmelCase__ = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return results
def _A ( lowerCAmelCase_ : Tuple ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 61 | 0 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''new-model'''
if is_tf_available():
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = NewModelConfig
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'bert-base-cased'
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForPreTraining.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForCausalLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : int ) -> str:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForMaskedLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForSequenceClassification.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
@require_tensorflow_probability
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(A_ )
lowerCamelCase_ , lowerCamelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(
A_ , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelWithLMHead.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = copy.deepcopy(model.config )
lowerCamelCase_ = ['FunnelBaseModel']
lowerCamelCase_ = TFAutoModel.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register('new-model' , A_ )
lowerCamelCase_ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
auto_class.register(A_ , A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
auto_class.register(A_ , A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase_ = BertModelTester(self ).get_config()
lowerCamelCase_ = NewModelConfig(**tiny_config.to_dict() )
lowerCamelCase_ = auto_class.from_config(A_ )
self.assertIsInstance(A_ , A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ )
lowerCamelCase_ = auto_class.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def a__ ( self : int ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('bert-base' )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCamelCase_ = TFAutoModel.from_pretrained(A_ , revision='aaaaaa' )
def a__ ( self : str ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(A_ , 'Use `from_pt=True` to load this model' ):
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
lowerCamelCase_ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 714 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''text''': Value('''string''' )} )
UpperCamelCase = Features({} )
UpperCamelCase = "text"
@property
def a__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 651 | 0 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( _A : Any , _A : Tuple , _A : Dict )-> List[str]:
"""simple docstring"""
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def UpperCamelCase ( _A : Union[str, Any] , _A : Optional[int] , _A : Union[str, Any] , _A : str="attention" )-> Optional[int]:
"""simple docstring"""
A__ = A__ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
A__ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
A__ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
A__ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
A__ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
A__ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
A__ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
A__ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def UpperCamelCase ( _A : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Tuple=False )-> List[Any]:
"""simple docstring"""
if split_mlp_wi:
A__ = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
A__ = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
A__ = (wi_a, wi_a)
else:
A__ = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
A__ = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def UpperCamelCase ( _A : int , _A : Union[str, Any] , _A : Dict , _A : List[Any] )-> Any:
"""simple docstring"""
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def UpperCamelCase ( _A : dict , *, _A : int , _A : bool , _A : bool = False )-> List[Any]:
"""simple docstring"""
A__ = traverse_util.flatten_dict(variables["target"] )
A__ = {"/".join(_A ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
A__ = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , _A )
A__ = collections.OrderedDict()
# Shared embeddings.
A__ = old["token_embedder/embedding"]
# Encoder.
for i in range(_A ):
# Block i, layer 0 (Self Attention).
A__ = tax_layer_norm_lookup(_A , _A , "encoder" , "pre_attention_layer_norm" )
A__ , A__ , A__ , A__ = tax_attention_lookup(_A , _A , "encoder" , "attention" )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 1 (MLP).
A__ = tax_layer_norm_lookup(_A , _A , "encoder" , "pre_mlp_layer_norm" )
A__ , A__ = tax_mlp_lookup(_A , _A , "encoder" , _A )
A__ = layer_norm
if split_mlp_wi:
A__ = wi[0].T
A__ = wi[1].T
else:
A__ = wi.T
A__ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
A__ = tax_relpos_bias_lookup(
_A , _A , "encoder" ).T
A__ = old["encoder/encoder_norm/scale"]
if not scalable_attention:
A__ = tax_relpos_bias_lookup(
_A , 0 , "encoder" ).T
A__ = tax_relpos_bias_lookup(
_A , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(_A ):
# Block i, layer 0 (Self Attention).
A__ = tax_layer_norm_lookup(_A , _A , "decoder" , "pre_self_attention_layer_norm" )
A__ , A__ , A__ , A__ = tax_attention_lookup(_A , _A , "decoder" , "self_attention" )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 1 (Cross Attention).
A__ = tax_layer_norm_lookup(_A , _A , "decoder" , "pre_cross_attention_layer_norm" )
A__ , A__ , A__ , A__ = tax_attention_lookup(_A , _A , "decoder" , "encoder_decoder_attention" )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 2 (MLP).
A__ = tax_layer_norm_lookup(_A , _A , "decoder" , "pre_mlp_layer_norm" )
A__ , A__ = tax_mlp_lookup(_A , _A , "decoder" , _A )
A__ = layer_norm
if split_mlp_wi:
A__ = wi[0].T
A__ = wi[1].T
else:
A__ = wi.T
A__ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
A__ = tax_relpos_bias_lookup(_A , _A , "decoder" ).T
A__ = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
A__ = old["decoder/logits_dense/kernel"].T
return new
def UpperCamelCase ( _A : Any , _A : bool )-> Optional[Any]:
"""simple docstring"""
A__ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
A__ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
A__ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
A__ = state_dict["shared.weight"]
return state_dict
def UpperCamelCase ( _A : str , _A : List[Any] , _A : Optional[int] , _A : List[str] , _A : Dict )-> str:
"""simple docstring"""
A__ = checkpoints.load_tax_checkpoint(_A )
A__ = convert_tax_to_pytorch(
_A , num_layers=config.num_layers , is_encoder_only=_A , scalable_attention=_A )
A__ = make_state_dict(_A , _A )
model.load_state_dict(_A , strict=_A )
def UpperCamelCase ( _A : List[Any] , _A : int , _A : List[Any] , _A : bool = False , _A : bool = False , )-> int:
"""simple docstring"""
A__ = MTaConfig.from_json_file(_A )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
A__ = UMTaEncoderModel(_A )
else:
A__ = UMTaForConditionalGeneration(_A )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_A , _A , _A , _A , _A )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_A )
# Verify that we can load the checkpoint.
model.from_pretrained(_A )
print("Done" )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 491 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=768 ):
super().__init__(UpperCAmelCase__ )
A__ = proj_size
A__ = CLIPVisionModel(UpperCAmelCase__ )
A__ = PaintByExampleMapper(UpperCAmelCase__ )
A__ = nn.LayerNorm(config.hidden_size )
A__ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
A__ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=False ):
A__ = self.model(pixel_values=UpperCAmelCase__ )
A__ = clip_output.pooler_output
A__ = self.mapper(latent_states[:, None] )
A__ = self.final_layer_norm(UpperCAmelCase__ )
A__ = self.proj_out(UpperCAmelCase__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class UpperCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase__ ):
super().__init__()
A__ = (config.num_hidden_layers + 1) // 5
A__ = config.hidden_size
A__ = 1
A__ = nn.ModuleList(
[
BasicTransformerBlock(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , activation_fn="gelu" , attention_bias=UpperCAmelCase__ )
for _ in range(UpperCAmelCase__ )
] )
def __A ( self , UpperCAmelCase__ ):
for block in self.blocks:
A__ = block(UpperCAmelCase__ )
return hidden_states
| 491 | 1 |
'''simple docstring'''
from collections import deque
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
_a = len(UpperCamelCase )
_a = deque()
_a = [False for _ in range(UpperCamelCase )]
_a = [-1 for _ in range(UpperCamelCase )]
_a = index_of[:]
def strong_connect(UpperCamelCase : List[str] , UpperCamelCase : int , UpperCamelCase : Tuple ):
_a = index # the number when this node is seen
_a = index # lowest rank node reachable from here
index += 1
stack.append(UpperCamelCase )
_a = True
for w in g[v]:
if index_of[w] == -1:
_a = strong_connect(UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
_a = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
_a = []
_a = stack.pop()
_a = False
component.append(UpperCamelCase )
while w != v:
_a = stack.pop()
_a = False
component.append(UpperCamelCase )
components.append(UpperCamelCase )
return index
_a = []
for v in range(UpperCamelCase ):
if index_of[v] == -1:
strong_connect(UpperCamelCase , 0 , UpperCamelCase )
return components
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_a = [[] for _ in range(UpperCamelCase )]
for u, v in edges:
g[u].append(UpperCamelCase )
return g
if __name__ == "__main__":
# Test
_snake_case : Optional[Any] = 7
_snake_case : List[Any] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_snake_case : Any = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_snake_case : Optional[Any] = [(u, v) for u, v in zip(source, target)]
_snake_case : Dict = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 377 |
'''simple docstring'''
_snake_case : Any = tuple[float, float, float]
_snake_case : Optional[int] = tuple[float, float, float]
def snake_case_ (UpperCamelCase : Pointad , UpperCamelCase : Pointad ):
'''simple docstring'''
_a = end_pointa[0] - end_pointa[0]
_a = end_pointa[1] - end_pointa[1]
_a = end_pointa[2] - end_pointa[2]
return (x, y, z)
def snake_case_ (UpperCamelCase : Vectorad , UpperCamelCase : Vectorad ):
'''simple docstring'''
_a = ab[1] * ac[2] - ab[2] * ac[1] # *i
_a = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_a = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def snake_case_ (UpperCamelCase : Vectorad , UpperCamelCase : int ):
'''simple docstring'''
return tuple(round(UpperCamelCase , UpperCamelCase ) for x in vector ) == (0, 0, 0)
def snake_case_ (UpperCamelCase : Pointad , UpperCamelCase : Pointad , UpperCamelCase : Pointad , UpperCamelCase : int = 10 ):
'''simple docstring'''
_a = create_vector(UpperCamelCase , UpperCamelCase )
_a = create_vector(UpperCamelCase , UpperCamelCase )
return is_zero_vector(get_ad_vectors_cross(UpperCamelCase , UpperCamelCase ) , UpperCamelCase )
| 377 | 1 |
import numpy as np
import qiskit
def __SCREAMING_SNAKE_CASE ( lowercase_ = 8 , lowercase_ = None ) -> Any:
'''simple docstring'''
__UpperCAmelCase : int = np.random.default_rng(seed=lowerCAmelCase__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__UpperCAmelCase : Any = 6 * key_len
# Measurement basis for Alice's qubits.
__UpperCAmelCase : List[Any] = rng.integers(2 , size=lowerCAmelCase__ )
# The set of states Alice will prepare.
__UpperCAmelCase : List[str] = rng.integers(2 , size=lowerCAmelCase__ )
# Measurement basis for Bob's qubits.
__UpperCAmelCase : Dict = rng.integers(2 , size=lowerCAmelCase__ )
# Quantum Circuit to simulate BB84
__UpperCAmelCase : Dict = qiskit.QuantumCircuit(lowerCAmelCase__ , name='''BB84''' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(lowerCAmelCase__ ):
if alice_state[index] == 1:
bbaa_circ.x(lowerCAmelCase__ )
if alice_basis[index] == 1:
bbaa_circ.h(lowerCAmelCase__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(lowerCAmelCase__ ):
if bob_basis[index] == 1:
bbaa_circ.h(lowerCAmelCase__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__UpperCAmelCase : Union[str, Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__UpperCAmelCase : str = qiskit.execute(lowerCAmelCase__ , lowerCAmelCase__ , shots=1 , seed_simulator=lowerCAmelCase__ )
# Returns the result of measurement.
__UpperCAmelCase : Dict = job.result().get_counts(lowerCAmelCase__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__UpperCAmelCase : int = ''''''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__UpperCAmelCase : Union[str, Any] = gen_key[:key_len] if len(lowerCAmelCase__ ) >= key_len else gen_key.ljust(lowerCAmelCase__ , '''0''' )
return key
if __name__ == "__main__":
print(F'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 462 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ ( __a , unittest.TestCase ):
_A :Tuple = KandinskyVaaInpaintPipeline
_A :Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_A :Optional[int] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_A :Optional[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_A :Union[str, Any] = False
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return 1_00
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
torch.manual_seed(0 )
lowercase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase = UNetaDConditionModel(**snake_case__ )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
torch.manual_seed(0 )
lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=snake_case__ , )
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : List[str] , snake_case__ : Union[str, Any]=0 ):
lowercase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowercase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(snake_case__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
lowercase = np.ones((64, 64) , dtype=np.floataa )
lowercase = 0
if str(snake_case__ ).startswith("""mps""" ):
lowercase = torch.manual_seed(snake_case__ )
else:
lowercase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowercase = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = """cpu"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**snake_case__ )
lowercase = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase = pipe(**self.get_dummy_inputs(snake_case__ ) )
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def SCREAMING_SNAKE_CASE__ ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowercase = np.ones((7_68, 7_68) , dtype=np.floataa )
lowercase = 0
lowercase = """a hat"""
lowercase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowercase = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
lowercase = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase , lowercase = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowercase = pipeline(
image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
lowercase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 428 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : List[Any] = False
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
super().setUp()
# fmt: off
__UpperCAmelCase : str = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__UpperCAmelCase : Any = dict(zip(snake_case , range(len(snake_case ) ) ) )
__UpperCAmelCase : List[str] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
__UpperCAmelCase : Dict = {'''unk_token''': '''<unk>'''}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(snake_case ) )
def lowerCamelCase__ ( self : Union[str, Any] , **snake_case : Union[str, Any] ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase__ ( self : List[Any] , **snake_case : int ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase__ ( self : Optional[int] , snake_case : Any ) -> Any:
__UpperCAmelCase : int = '''lower newer'''
__UpperCAmelCase : List[Any] = '''lower newer'''
return input_text, output_text
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
__UpperCAmelCase : str = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCAmelCase : int = '''lower newer'''
__UpperCAmelCase : Optional[int] = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
__UpperCAmelCase : Any = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
__UpperCAmelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : Any = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
@require_ftfy
def lowerCamelCase__ ( self : str ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained(snake_case , **snake_case )
__UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
__UpperCAmelCase : Optional[int] = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
__UpperCAmelCase : int = tokenizer_s.tokenize(snake_case )
__UpperCAmelCase : Optional[int] = tokenizer_r.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__UpperCAmelCase : Any = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
__UpperCAmelCase : Optional[Any] = tokenizer_s.tokenize(snake_case )
__UpperCAmelCase : Optional[int] = tokenizer_r.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
# Test that the tokenization is identical on unicode of space type
__UpperCAmelCase : List[Any] = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__UpperCAmelCase : Dict = tokenizer_s.tokenize(snake_case )
__UpperCAmelCase : List[str] = tokenizer_r.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
# Test that the tokenization is identical on unicode of line break type
__UpperCAmelCase : Optional[int] = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__UpperCAmelCase : Union[str, Any] = tokenizer_s.tokenize(snake_case )
__UpperCAmelCase : Optional[Any] = tokenizer_r.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCAmelCase : Optional[int] = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCAmelCase : Optional[int] = f'{text_of_1_token} {text_of_1_token}'
__UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(
snake_case , use_fast=snake_case , )
__UpperCAmelCase : List[str] = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(snake_case ) + 1, len(snake_case ) + 1 + len(snake_case )) , )
__UpperCAmelCase : int = f' {text}'
__UpperCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(
snake_case , use_fast=snake_case , )
__UpperCAmelCase : Union[str, Any] = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(snake_case ) + 1, 1 + len(snake_case ) + 1 + len(snake_case )) , )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(snake_case ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
super().test_tokenization_python_rust_equals()
def lowerCamelCase__ ( self : Any ) -> Tuple:
# CLIP always lower cases letters
pass | 266 |
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _a ( _lowercase : Optional[Any] , _lowercase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = k_size // 2
__UpperCAmelCase , __UpperCAmelCase : Any = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__UpperCAmelCase : Optional[int] = 1 / (2 * pi * sigma) * exp(-(square(_lowercase ) + square(_lowercase )) / (2 * square(_lowercase )) )
return g
def _a ( _lowercase : str , _lowercase : List[str] , _lowercase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Any = image.shape[0], image.shape[1]
# dst image height and width
__UpperCAmelCase : Any = height - k_size + 1
__UpperCAmelCase : str = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__UpperCAmelCase : Optional[int] = zeros((dst_height * dst_width, k_size * k_size) )
__UpperCAmelCase : Optional[Any] = 0
for i, j in product(range(_lowercase ) , range(_lowercase ) ):
__UpperCAmelCase : List[Any] = ravel(image[i : i + k_size, j : j + k_size] )
__UpperCAmelCase : Optional[int] = window
row += 1
# turn the kernel into shape(k*k, 1)
__UpperCAmelCase : str = gen_gaussian_kernel(_lowercase , _lowercase )
__UpperCAmelCase : Union[str, Any] = ravel(_lowercase )
# reshape and get the dst image
__UpperCAmelCase : Optional[int] = dot(_lowercase , _lowercase ).reshape(_lowercase , _lowercase ).astype(_lowercase )
return dst
if __name__ == "__main__":
# read original image
__UpperCAmelCase :Dict = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
__UpperCAmelCase :Tuple = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__UpperCAmelCase :Tuple = gaussian_filter(gray, 3, sigma=1)
__UpperCAmelCase :Optional[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey() | 266 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : str ):
'''simple docstring'''
# test for the above condition
self.test()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : int =0
lowercase : List[Any] =False
while not completed:
if counter == 1:
self.reset()
lowercase : Dict =self.advance()
if not self.does_advance(UpperCAmelCase__ ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
lowercase , lowercase , lowercase : str =self.update(UpperCAmelCase__ )
counter += 1
if counter > 10000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : int ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=False ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : int , UpperCAmelCase__ : List[int] ):
'''simple docstring'''
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
lowercase : Union[str, Any] =token_ids
lowercase : Tuple =len(self.token_ids )
lowercase : Tuple =-1 # the index of the currently fulfilled step
lowercase : int =False
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowercase : Any =False
lowercase : Optional[int] =False
lowercase : int =False
if self.does_advance(UpperCAmelCase__ ):
self.fulfilled_idx += 1
lowercase : Any =True
if self.fulfilled_idx == (self.seqlen - 1):
lowercase : List[str] =True
lowercase : List[Any] =completed
else:
# failed to make progress.
lowercase : Union[str, Any] =True
self.reset()
return stepped, completed, reset
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : int =False
lowercase : Tuple =0
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int=False ):
'''simple docstring'''
lowercase : Dict =PhrasalConstraint(self.token_ids )
if stateful:
lowercase : Union[str, Any] =self.seqlen
lowercase : Optional[Any] =self.fulfilled_idx
lowercase : List[str] =self.completed
return new_constraint
class __SCREAMING_SNAKE_CASE :
def __init__( self : Any , UpperCAmelCase__ : List[List[int]] , UpperCAmelCase__ : Dict=True ):
'''simple docstring'''
lowercase : str =max([len(UpperCAmelCase__ ) for one in nested_token_ids] )
lowercase : Any ={}
for token_ids in nested_token_ids:
lowercase : Any =root
for tidx, token_id in enumerate(UpperCAmelCase__ ):
if token_id not in level:
lowercase : int ={}
lowercase : Union[str, Any] =level[token_id]
if no_subsets and self.has_subsets(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F''' {nested_token_ids}.''' )
lowercase : Dict =root
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : Any =self.trie
for current_token in current_seq:
lowercase : List[str] =start[current_token]
lowercase : Optional[Any] =list(start.keys() )
return next_tokens
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.next_tokens(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) == 0
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : Any =list(root.values() )
if len(UpperCAmelCase__ ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCAmelCase__ ) for nn in next_nodes] )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : List[str] =self.count_leaves(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) != leaf_count
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : List[str] , UpperCAmelCase__ : List[List[int]] ):
'''simple docstring'''
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
lowercase : Any =DisjunctiveTrie(UpperCAmelCase__ )
lowercase : Tuple =nested_token_ids
lowercase : Dict =self.trie.max_height
lowercase : List[str] =[]
lowercase : str =False
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Any =self.trie.next_tokens(self.current_seq )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowercase : Union[str, Any] =self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowercase : Any =False
lowercase : Any =False
lowercase : Optional[Any] =False
if self.does_advance(UpperCAmelCase__ ):
self.current_seq.append(UpperCAmelCase__ )
lowercase : Dict =True
else:
lowercase : Tuple =True
self.reset()
lowercase : Union[str, Any] =self.trie.reached_leaf(self.current_seq )
lowercase : int =completed
return stepped, completed, reset
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : int =False
lowercase : Tuple =[]
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Any=False ):
'''simple docstring'''
lowercase : Dict =DisjunctiveConstraint(self.token_ids )
if stateful:
lowercase : Union[str, Any] =self.seqlen
lowercase : int =self.current_seq
lowercase : Dict =self.completed
return new_constraint
class __SCREAMING_SNAKE_CASE :
def __init__( self : int , UpperCAmelCase__ : List[Constraint] ):
'''simple docstring'''
lowercase : List[str] =constraints
# max # of steps required to fulfill a given constraint
lowercase : str =max([c.seqlen for c in constraints] )
lowercase : Any =len(UpperCAmelCase__ )
lowercase : Union[str, Any] =False
self.init_state()
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[str] =[]
lowercase : Optional[Any] =None
lowercase : List[Any] =[constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.constraints]
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[Any] =0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =[]
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowercase : List[str] =constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
else:
lowercase : str =self.inprogress_constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[List[int]] ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowercase , lowercase : Optional[Any] =self.add(UpperCAmelCase__ )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
lowercase , lowercase : List[Any] =False, False
if self.completed:
lowercase : Tuple =True
lowercase : Union[str, Any] =False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowercase , lowercase , lowercase : Union[str, Any] =self.inprogress_constraint.update(UpperCAmelCase__ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCAmelCase__ ) )
lowercase : List[str] =None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowercase : Optional[Any] =None
if len(self.pending_constraints ) == 0:
# we're done!
lowercase : int =True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCAmelCase__ ):
lowercase , lowercase , lowercase : List[Any] =pending_constraint.update(UpperCAmelCase__ )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(UpperCAmelCase__ )
lowercase : Union[str, Any] =None
if not complete and stepped:
lowercase : Any =pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowercase : int =(
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowercase : List[Any] =True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Optional[int]=True ):
'''simple docstring'''
lowercase : int =ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowercase : Tuple =[
constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowercase : Dict =self.inprogress_constraint.copy(stateful=UpperCAmelCase__ )
lowercase : int =[constraint.copy() for constraint in self.pending_constraints]
return new_state
| 92 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 600851475143 ) -> int:
try:
lowercase : Any =int(__magic_name__ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =2
lowercase : Dict =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase : Union[str, Any] =i
while n % i == 0:
lowercase : Optional[int] =n // i
i += 1
return int(__magic_name__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 92 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A =logging.get_logger(__name__)
__A ={
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _snake_case ( a__ , a__ ):
lowerCAmelCase :Dict = '''nat'''
lowerCAmelCase :Tuple = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowerCamelCase=4 , _lowerCamelCase=3 , _lowerCamelCase=64 , _lowerCamelCase=[3, 4, 6, 5] , _lowerCamelCase=[2, 4, 8, 16] , _lowerCamelCase=7 , _lowerCamelCase=3.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0 , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase)
UpperCAmelCase__ : List[str] = patch_size
UpperCAmelCase__ : Any = num_channels
UpperCAmelCase__ : str = embed_dim
UpperCAmelCase__ : Tuple = depths
UpperCAmelCase__ : Union[str, Any] = len(_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = num_heads
UpperCAmelCase__ : Tuple = kernel_size
UpperCAmelCase__ : Any = mlp_ratio
UpperCAmelCase__ : Optional[int] = qkv_bias
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = drop_path_rate
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Optional[Any] = layer_norm_eps
UpperCAmelCase__ : List[str] = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase__ : int = int(embed_dim * 2 ** (len(_lowerCamelCase) - 1))
UpperCAmelCase__ : Optional[int] = layer_scale_init_value
UpperCAmelCase__ : Tuple = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(_lowerCamelCase) + 1)]
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names) | 113 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 113 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowercase ( _SCREAMING_SNAKE_CASE ):
@staticmethod
@abstractmethod
def UpperCamelCase ( lowerCamelCase__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
raise NotImplementedError()
| 203 |
'''simple docstring'''
def A ():
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def A (__lowerCamelCase :List[Any] ):
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
_lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def A ():
return next(i for i in triangle_number_generator() if count_divisors(__lowerCamelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 5 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowercase ( _A ):
def __init__( self : str , __lowerCamelCase : TransformeraDModel , __lowerCamelCase : AutoencoderKL , __lowerCamelCase : KarrasDiffusionSchedulers , __lowerCamelCase : Optional[Dict[int, str]] = None , ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(transformer=__lowerCamelCase , vae=__lowerCamelCase , scheduler=__lowerCamelCase )
# create a imagenet -> id dictionary for easier use
lowercase = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
lowercase = int(__lowerCamelCase )
lowercase = dict(sorted(self.labels.items() ) )
def __a ( self : Union[str, Any] , __lowerCamelCase : Union[str, List[str]] ) -> List[int]:
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase = list(__lowerCamelCase )
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : float = 4.0 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
lowercase = len(__lowerCamelCase )
lowercase = self.transformer.config.sample_size
lowercase = self.transformer.config.in_channels
lowercase = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__lowerCamelCase , device=self.device , dtype=self.transformer.dtype , )
lowercase = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase = torch.tensor(__lowerCamelCase , device=self.device ).reshape(-1 )
lowercase = torch.tensor([10_00] * batch_size , device=self.device )
lowercase = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase = latent_model_input[: len(__lowerCamelCase ) // 2]
lowercase = torch.cat([half, half] , dim=0 )
lowercase = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
lowercase = t
if not torch.is_tensor(__lowerCamelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase = latent_model_input.device.type == '''mps'''
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase = torch.floataa if is_mps else torch.floataa
else:
lowercase = torch.intaa if is_mps else torch.intaa
lowercase = torch.tensor([timesteps] , dtype=__lowerCamelCase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase = self.transformer(
__lowerCamelCase , timestep=__lowerCamelCase , class_labels=__lowerCamelCase ).sample
# perform guidance
if guidance_scale > 1:
lowercase ,lowercase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase ,lowercase = torch.split(__lowerCamelCase , len(__lowerCamelCase ) // 2 , dim=0 )
lowercase = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase = torch.cat([half_eps, half_eps] , dim=0 )
lowercase = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase ,lowercase = torch.split(__lowerCamelCase , __lowerCamelCase , dim=1 )
else:
lowercase = noise_pred
# compute previous image: x_t -> x_t-1
lowercase = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
if guidance_scale > 1:
lowercase ,lowercase = latent_model_input.chunk(2 , dim=0 )
else:
lowercase = latent_model_input
lowercase = 1 / self.vae.config.scaling_factor * latents
lowercase = self.vae.decode(__lowerCamelCase ).sample
lowercase = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 479 | from __future__ import annotations
from collections.abc import MutableSequence
class __lowercase :
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : MutableSequence[float] ) -> None:
'''simple docstring'''
if len(__lowerCamelCase ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase = list(__lowerCamelCase )
lowercase = degree
def __add__( self : Any , __lowerCamelCase : Polynomial ) -> Polynomial:
'''simple docstring'''
if self.degree > polynomial_a.degree:
lowercase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , __lowerCamelCase )
else:
lowercase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , __lowerCamelCase )
def __sub__( self : str , __lowerCamelCase : Polynomial ) -> Polynomial:
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : str ) -> Polynomial:
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : List[str] , __lowerCamelCase : Polynomial ) -> Polynomial:
'''simple docstring'''
lowercase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , __lowerCamelCase )
def __a ( self : List[str] , __lowerCamelCase : int | float ) -> int | float:
'''simple docstring'''
lowercase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : str ) -> str:
'''simple docstring'''
lowercase = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__lowerCamelCase )
return polynomial
def __repr__( self : Tuple ) -> str:
'''simple docstring'''
return self.__str__()
def __a ( self : Union[str, Any] ) -> Polynomial:
'''simple docstring'''
lowercase = [0] * self.degree
for i in range(self.degree ):
lowercase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , __lowerCamelCase )
def __a ( self : Union[str, Any] , __lowerCamelCase : int | float = 0 ) -> Polynomial:
'''simple docstring'''
lowercase = [0] * (self.degree + 2)
lowercase = constant
for i in range(self.degree + 1 ):
lowercase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , __lowerCamelCase )
def __eq__( self : Tuple , __lowerCamelCase : object ) -> bool:
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple , __lowerCamelCase : object ) -> bool:
'''simple docstring'''
return not self.__eq__(__lowerCamelCase )
| 479 | 1 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
_lowercase = {
'''Salesforce/codegen-350M-mono''': 2048,
}
class UpperCAmelCase_ ( A__ ):
'''simple docstring'''
_lowercase : int = VOCAB_FILES_NAMES
_lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : str = ['''input_ids''', '''attention_mask''']
_lowercase : Tuple = CodeGenTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase="<|endoftext|>" , _lowercase="<|endoftext|>" , _lowercase="<|endoftext|>" , _lowercase=False , **_lowercase , ):
"""simple docstring"""
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
if kwargs.pop("""add_bos_token""" , __lowerCamelCase ):
_lowerCAmelCase = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n"""
F'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
F'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
_lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __lowerCamelCase ) != add_prefix_space:
_lowerCAmelCase = getattr(__lowerCamelCase , pre_tok_state.pop("""type""" ) )
_lowerCAmelCase = add_prefix_space
_lowerCAmelCase = pre_tok_class(**__lowerCamelCase )
_lowerCAmelCase = add_prefix_space
def _lowercase ( self , *_lowercase , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = kwargs.get("""is_split_into_words""" , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def _lowercase ( self , *_lowercase , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = kwargs.get("""is_split_into_words""" , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
_lowerCAmelCase = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def _lowercase ( self , _lowercase , _lowercase = False , _lowercase = None , _lowercase = None , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = super().decode(
token_ids=__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase , **__lowerCamelCase , )
if truncate_before_pattern is not None and len(__lowerCamelCase ) > 0:
_lowerCAmelCase = self.truncate(__lowerCamelCase , __lowerCamelCase )
return decoded_text
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
def find_re(_lowercase , _lowercase , _lowercase ):
_lowerCAmelCase = pattern.search(__lowerCamelCase , __lowerCamelCase )
return m.start() if m else -1
_lowerCAmelCase = [re.compile(__lowerCamelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
_lowerCAmelCase = list(re.finditer("""^print""" , __lowerCamelCase , re.MULTILINE ) )
if len(__lowerCamelCase ) > 1:
_lowerCAmelCase = completion[: prints[1].start()]
_lowerCAmelCase = list(re.finditer("""^def""" , __lowerCamelCase , re.MULTILINE ) )
if len(__lowerCamelCase ) > 1:
_lowerCAmelCase = completion[: defs[1].start()]
_lowerCAmelCase = 0
_lowerCAmelCase = [
pos for pos in [find_re(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) for terminal in terminals] if pos != -1
]
if len(__lowerCamelCase ) > 0:
return completion[: min(__lowerCamelCase )]
else:
return completion
| 5 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCAmelCase__ :
"""simple docstring"""
a = PegasusConfig
a = {}
a = "gelu"
def __init__( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str=13 , __lowerCamelCase : int=7 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : str=99 , __lowerCamelCase : Optional[Any]=32 , __lowerCamelCase : Dict=2 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : List[Any]=37 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Dict=40 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : int=1 , __lowerCamelCase : str=0 , ) -> List[str]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = eos_token_id
SCREAMING_SNAKE_CASE__ = pad_token_id
SCREAMING_SNAKE_CASE__ = bos_token_id
def lowercase_ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE__ = prepare_pegasus_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def lowercase_ ( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = TFPegasusModel(config=__lowerCamelCase ).get_decoder()
SCREAMING_SNAKE_CASE__ = inputs_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ = inputs_dict['''attention_mask'''][:1, :]
SCREAMING_SNAKE_CASE__ = inputs_dict['''head_mask''']
SCREAMING_SNAKE_CASE__ = 1
# first forward pass
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1e-3 )
def UpperCAmelCase_ ( _A , _A , _A , _A=None , _A=None , _A=None , _A=None , _A=None , ):
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ = tf.cast(tf.math.not_equal(_A , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
a = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
a = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
a = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
a = True
a = False
a = False
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = TFPegasusModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def lowercase_ ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
a = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
a = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
a = "google/pegasus-xsum"
@cached_property
def lowercase_ ( self : Optional[Any] ) -> str:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowercase_ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowercase_ ( self : str , **__lowerCamelCase : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.translate_src_text(**__lowerCamelCase )
assert self.expected_text == generated_words
def lowercase_ ( self : Dict , **__lowerCamelCase : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.src_text , **__lowerCamelCase , padding=__lowerCamelCase , return_tensors='''tf''' )
SCREAMING_SNAKE_CASE__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCamelCase )
return generated_words
@slow
def lowercase_ ( self : List[Any] ) -> int:
self._assert_generated_batch_equal_expected()
| 493 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case( A_ ):
'''simple docstring'''
_UpperCAmelCase = (DEISMultistepScheduler,)
_UpperCAmelCase = (("num_inference_steps", 2_5),)
def _a ( self , **__lowerCamelCase ):
'''simple docstring'''
__A : List[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**__lowerCamelCase )
return config
def _a ( self , __lowerCamelCase=0 , **__lowerCamelCase ):
'''simple docstring'''
__A : str = dict(self.forward_default_kwargs )
__A : List[Any] = kwargs.pop('num_inference_steps' , __lowerCamelCase )
__A : str = self.dummy_sample
__A : List[Any] = 0.1 * sample
__A : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__A : str = self.get_scheduler_config(**__lowerCamelCase )
__A : int = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
__A : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
__A : Dict = scheduler_class.from_pretrained(__lowerCamelCase )
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
__A : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
__A , __A : List[str] = sample, sample
for t in range(__lowerCamelCase , time_step + scheduler.config.solver_order + 1 ):
__A : List[str] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
__A : Any = new_scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _a ( self ):
'''simple docstring'''
pass
def _a ( self , __lowerCamelCase=0 , **__lowerCamelCase ):
'''simple docstring'''
__A : Dict = dict(self.forward_default_kwargs )
__A : int = kwargs.pop('num_inference_steps' , __lowerCamelCase )
__A : List[str] = self.dummy_sample
__A : Optional[Any] = 0.1 * sample
__A : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__A : Tuple = self.get_scheduler_config()
__A : int = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
__A : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
__A : List[Any] = scheduler_class.from_pretrained(__lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
__A : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
__A : Optional[Any] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
__A : List[Any] = new_scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _a ( self , __lowerCamelCase=None , **__lowerCamelCase ):
'''simple docstring'''
if scheduler is None:
__A : Any = self.scheduler_classes[0]
__A : int = self.get_scheduler_config(**__lowerCamelCase )
__A : List[Any] = scheduler_class(**__lowerCamelCase )
__A : Dict = self.scheduler_classes[0]
__A : Union[str, Any] = self.get_scheduler_config(**__lowerCamelCase )
__A : Dict = scheduler_class(**__lowerCamelCase )
__A : str = 10
__A : Dict = self.dummy_model()
__A : str = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
__A : Optional[int] = model(__lowerCamelCase , __lowerCamelCase )
__A : Tuple = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
return sample
def _a ( self ):
'''simple docstring'''
__A : Optional[Any] = dict(self.forward_default_kwargs )
__A : List[str] = kwargs.pop('num_inference_steps' , __lowerCamelCase )
for scheduler_class in self.scheduler_classes:
__A : Any = self.get_scheduler_config()
__A : Optional[Any] = scheduler_class(**__lowerCamelCase )
__A : Tuple = self.dummy_sample
__A : str = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCamelCase , 'set_timesteps' ):
scheduler.set_timesteps(__lowerCamelCase )
elif num_inference_steps is not None and not hasattr(__lowerCamelCase , 'set_timesteps' ):
__A : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__A : Any = [residual + 0.2, residual + 0.15, residual + 0.10]
__A : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
__A : Union[str, Any] = scheduler.timesteps[5]
__A : Tuple = scheduler.timesteps[6]
__A : Tuple = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
__A : Optional[int] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _a ( self ):
'''simple docstring'''
__A : List[Any] = DEISMultistepScheduler(**self.get_scheduler_config() )
__A : Tuple = self.full_loop(scheduler=__lowerCamelCase )
__A : Union[str, Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
__A : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__A : Optional[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
__A : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
__A : str = DEISMultistepScheduler.from_config(scheduler.config )
__A : str = self.full_loop(scheduler=__lowerCamelCase )
__A : List[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def _a ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def _a ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCamelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , algorithm_type='deis' , solver_order=__lowerCamelCase , solver_type=__lowerCamelCase , )
def _a ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def _a ( self ):
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCamelCase , solver_type=__lowerCamelCase , prediction_type=__lowerCamelCase , algorithm_type=__lowerCamelCase , )
__A : str = self.full_loop(
solver_order=__lowerCamelCase , solver_type=__lowerCamelCase , prediction_type=__lowerCamelCase , algorithm_type=__lowerCamelCase , )
assert not torch.isnan(__lowerCamelCase ).any(), "Samples have nan numbers"
def _a ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=__lowerCamelCase )
self.check_over_configs(lower_order_final=__lowerCamelCase )
def _a ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__lowerCamelCase , time_step=0 )
def _a ( self ):
'''simple docstring'''
__A : int = self.full_loop()
__A : Any = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def _a ( self ):
'''simple docstring'''
__A : Any = self.full_loop(prediction_type='v_prediction' )
__A : Any = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.0_91 ) < 1e-3
def _a ( self ):
'''simple docstring'''
__A : List[str] = self.scheduler_classes[0]
__A : Dict = self.get_scheduler_config(thresholding=__lowerCamelCase , dynamic_thresholding_ratio=0 )
__A : Any = scheduler_class(**__lowerCamelCase )
__A : Dict = 10
__A : List[str] = self.dummy_model()
__A : List[str] = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
__A : Union[str, Any] = model(__lowerCamelCase , __lowerCamelCase )
__A : List[str] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
assert sample.dtype == torch.floataa
| 237 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
lowerCamelCase : Optional[Any] ={
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class __snake_case( A_ ):
'''simple docstring'''
_UpperCAmelCase = "tapas"
def __init__( self , __lowerCamelCase=30522 , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=1024 , __lowerCamelCase=[3, 256, 256, 2, 256, 256, 10] , __lowerCamelCase=0.02 , __lowerCamelCase=1e-12 , __lowerCamelCase=0 , __lowerCamelCase=10.0 , __lowerCamelCase=0 , __lowerCamelCase=1.0 , __lowerCamelCase=None , __lowerCamelCase=1.0 , __lowerCamelCase=False , __lowerCamelCase=None , __lowerCamelCase=1.0 , __lowerCamelCase=1.0 , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase="ratio" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=64 , __lowerCamelCase=32 , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__A : int = vocab_size
__A : int = hidden_size
__A : Tuple = num_hidden_layers
__A : Tuple = num_attention_heads
__A : Union[str, Any] = hidden_act
__A : str = intermediate_size
__A : Any = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : Optional[int] = max_position_embeddings
__A : List[str] = type_vocab_sizes
__A : Tuple = initializer_range
__A : List[Any] = layer_norm_eps
# Fine-tuning task hyperparameters
__A : Union[str, Any] = positive_label_weight
__A : Optional[int] = num_aggregation_labels
__A : Optional[Any] = aggregation_loss_weight
__A : List[Any] = use_answer_as_supervision
__A : List[str] = answer_loss_importance
__A : Dict = use_normalized_answer_loss
__A : Dict = huber_loss_delta
__A : Union[str, Any] = temperature
__A : List[str] = aggregation_temperature
__A : Optional[int] = use_gumbel_for_cells
__A : Optional[int] = use_gumbel_for_aggregation
__A : Optional[int] = average_approximation_function
__A : List[Any] = cell_selection_preference
__A : Optional[int] = answer_loss_cutoff
__A : Tuple = max_num_rows
__A : List[str] = max_num_columns
__A : Optional[int] = average_logits_per_cell
__A : Tuple = select_one_column
__A : int = allow_empty_column_selection
__A : List[Any] = init_cell_selection_weights_to_zero
__A : List[Any] = reset_position_index_per_cell
__A : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
__A : Any = aggregation_labels
__A : Optional[int] = no_aggregation_label_index
if isinstance(self.aggregation_labels , __lowerCamelCase ):
__A : Optional[int] = {int(__lowerCamelCase ): v for k, v in aggregation_labels.items()}
| 237 | 1 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case = (DDPMScheduler,)
def _lowercase ( self , **UpperCAmelCase_ ):
snake_case_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase_ )
return config
def _lowercase ( self ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_ )
def _lowercase ( self ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_ )
def _lowercase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_ )
def _lowercase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase_ )
def _lowercase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_ )
def _lowercase ( self ):
self.check_over_configs(thresholding=UpperCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def _lowercase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_ )
def _lowercase ( self ):
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**UpperCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5
def _lowercase ( self ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**UpperCAmelCase_ )
snake_case_ = len(UpperCAmelCase_ )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
snake_case_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase_ ) ):
# 1. predict noise residual
snake_case_ = model(UpperCAmelCase_ , UpperCAmelCase_ )
# 2. predict previous mean of sample x_t-1
snake_case_ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case_ = pred_prev_sample
snake_case_ = torch.sum(torch.abs(UpperCAmelCase_ ) )
snake_case_ = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def _lowercase ( self ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(prediction_type="v_prediction" )
snake_case_ = scheduler_class(**UpperCAmelCase_ )
snake_case_ = len(UpperCAmelCase_ )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
snake_case_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase_ ) ):
# 1. predict noise residual
snake_case_ = model(UpperCAmelCase_ , UpperCAmelCase_ )
# 2. predict previous mean of sample x_t-1
snake_case_ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case_ = pred_prev_sample
snake_case_ = torch.sum(torch.abs(UpperCAmelCase_ ) )
snake_case_ = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def _lowercase ( self ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**UpperCAmelCase_ )
snake_case_ = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase_ )
snake_case_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase_ ):
if i == len(UpperCAmelCase_ ) - 1:
snake_case_ = -1
else:
snake_case_ = timesteps[i + 1]
snake_case_ = scheduler.previous_timestep(UpperCAmelCase_ )
snake_case_ = prev_t.item()
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**UpperCAmelCase_ )
snake_case_ = [1_00, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase_ , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**UpperCAmelCase_ )
snake_case_ = [1_00, 87, 50, 1, 0]
snake_case_ = len(UpperCAmelCase_ )
with self.assertRaises(UpperCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**UpperCAmelCase_ )
snake_case_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_ )
| 508 |
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowercase__ = HfApi()
lowercase__ = {}
# fmt: off
lowercase__ = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
lowercase__ = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
lowercase__ = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
lowercase__ = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
lowercase__ = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
lowercase__ = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
lowercase__ = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
lowercase__ = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
lowercase__ = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
lowercase__ = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
lowercase__ = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
lowercase__ = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
lowercase__ = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
lowercase__ = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
lowercase__ = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
lowercase__ = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowercase__ = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('''CompVis'''):
lowercase__ = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
lowercase__ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowercase__ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowercase__ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowercase__ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 508 | 1 |
"""simple docstring"""
UpperCamelCase_ : int = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 482 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
UpperCamelCase_ : Union[str, Any] = random.Random()
def A_ (__a , __a=1.0 , __a=None , __a=None ):
'''simple docstring'''
if rng is None:
A_ = global_rng
A_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : str , _snake_case : Union[str, Any]=7 , _snake_case : Any=400 , _snake_case : Tuple=2_000 , _snake_case : str=2_048 , _snake_case : List[Any]=128 , _snake_case : List[Any]=1 , _snake_case : Union[str, Any]=512 , _snake_case : List[str]=30 , _snake_case : List[str]=44_100 , ) -> Optional[int]:
"""simple docstring"""
A_ = parent
A_ = batch_size
A_ = min_seq_length
A_ = max_seq_length
A_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A_ = spectrogram_length
A_ = feature_size
A_ = num_audio_channels
A_ = hop_length
A_ = chunk_length
A_ = sampling_rate
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCamelCase__ ( self : Dict , _snake_case : str=False , _snake_case : Optional[Any]=False ) -> List[str]:
"""simple docstring"""
def _flatten(_snake_case : List[Any] ):
return list(itertools.chain(*_snake_case ) )
if equal_length:
A_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A_ = [np.asarray(_snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = TvltFeatureExtractor
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A_ = TvltFeatureExtractionTester(self )
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_snake_case , "spectrogram_length" ) )
self.assertTrue(hasattr(_snake_case , "feature_size" ) )
self.assertTrue(hasattr(_snake_case , "num_audio_channels" ) )
self.assertTrue(hasattr(_snake_case , "hop_length" ) )
self.assertTrue(hasattr(_snake_case , "chunk_length" ) )
self.assertTrue(hasattr(_snake_case , "sampling_rate" ) )
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = feat_extract_first.save_pretrained(_snake_case )[0]
check_json_file_has_correct_format(_snake_case )
A_ = self.feature_extraction_class.from_pretrained(_snake_case )
A_ = feat_extract_first.to_dict()
A_ = feat_extract_second.to_dict()
A_ = dict_first.pop("mel_filters" )
A_ = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(_snake_case , _snake_case ) )
self.assertEqual(_snake_case , _snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
A_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = os.path.join(_snake_case , "feat_extract.json" )
feat_extract_first.to_json_file(_snake_case )
A_ = self.feature_extraction_class.from_json_file(_snake_case )
A_ = feat_extract_first.to_dict()
A_ = feat_extract_second.to_dict()
A_ = dict_first.pop("mel_filters" )
A_ = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(_snake_case , _snake_case ) )
self.assertEqual(_snake_case , _snake_case )
def lowerCamelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
# Initialize feature_extractor
A_ = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
A_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
A_ = [np.asarray(_snake_case ) for speech_input in speech_inputs]
# Test not batched input
A_ = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
A_ = feature_extractor(_snake_case , return_tensors="np" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
A_ = feature_extractor(
_snake_case , return_tensors="np" , sampling_rate=44_100 , mask_audio=_snake_case ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
A_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A_ = np.asarray(_snake_case )
A_ = feature_extractor(_snake_case , return_tensors="np" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCamelCase__ ( self : Dict , _snake_case : int ) -> List[str]:
"""simple docstring"""
A_ = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
A_ = ds.sort("id" ).select(range(_snake_case ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowerCamelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
A_ = self._load_datasamples(1 )
A_ = TvltFeatureExtractor()
A_ = feature_extractor(_snake_case , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
A_ = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _snake_case , atol=1e-4 ) )
| 482 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Any = ['''model.decoder.embed_positions.weights''']
def SCREAMING_SNAKE_CASE ( snake_case ) -> int:
if "emb" in name:
__lowercase = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
__lowercase = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
__lowercase = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
__lowercase = name.replace('linear1' , 'fc1' )
if "linear2" in name:
__lowercase = name.replace('linear2' , 'fc2' )
if "norm1" in name:
__lowercase = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
__lowercase = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
__lowercase = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
__lowercase = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
__lowercase = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
__lowercase = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Tuple[Dict, Dict]:
__lowercase = list(state_dict.keys() )
__lowercase = {}
for key in keys:
__lowercase = state_dict.pop(snake_case )
__lowercase = rename_keys(snake_case )
if "in_proj_weight" in key:
# split fused qkv proj
__lowercase = val[:hidden_size, :]
__lowercase = val[hidden_size : 2 * hidden_size, :]
__lowercase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowercase = val
else:
__lowercase = val
return state_dict, enc_dec_proj_state_dict
def SCREAMING_SNAKE_CASE ( snake_case ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
__lowercase = 1_024
__lowercase = 24
__lowercase = 16
elif checkpoint == "medium":
__lowercase = 1_536
__lowercase = 48
__lowercase = 24
elif checkpoint == "large":
__lowercase = 2_048
__lowercase = 48
__lowercase = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
__lowercase = MusicgenDecoderConfig(
hidden_size=snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case , num_attention_heads=snake_case , )
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case , snake_case=None , snake_case=None , snake_case="cpu" ) -> Optional[int]:
__lowercase = MusicGen.get_pretrained(snake_case , device=snake_case )
__lowercase = decoder_config_from_checkpoint(snake_case )
__lowercase = fairseq_model.lm.state_dict()
__lowercase , __lowercase = rename_state_dict(
snake_case , hidden_size=decoder_config.hidden_size )
__lowercase = TaEncoderModel.from_pretrained('t5-base' )
__lowercase = EncodecModel.from_pretrained('facebook/encodec_32khz' )
__lowercase = MusicgenForCausalLM(snake_case ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowercase , __lowercase = decoder.load_state_dict(snake_case , strict=snake_case )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(snake_case )
if len(snake_case ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(snake_case ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
__lowercase = MusicgenForConditionalGeneration(text_encoder=snake_case , audio_encoder=snake_case , decoder=snake_case )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(snake_case )
# check we can do a forward pass
__lowercase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowercase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowercase = model(input_ids=snake_case , decoder_input_ids=snake_case ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
__lowercase = AutoTokenizer.from_pretrained('t5-base' )
__lowercase = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
__lowercase = MusicgenProcessor(feature_extractor=snake_case , tokenizer=snake_case )
# set the appropriate bos/pad token ids
__lowercase = 2_048
__lowercase = 2_048
# set other default generation config params
__lowercase = int(30 * audio_encoder.config.frame_rate )
__lowercase = True
__lowercase = 3.0
if pytorch_dump_folder is not None:
Path(snake_case ).mkdir(exist_ok=snake_case )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(snake_case )
processor.push_to_hub(snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
SCREAMING_SNAKE_CASE_ : Any = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 375 | 0 |
def lowerCAmelCase_ ( lowercase: int ) -> int:
'''simple docstring'''
_UpperCamelCase: str = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCAmelCase_ ( lowercase: int ) -> int:
'''simple docstring'''
_UpperCamelCase: Optional[Any] = 0
while number > 0:
_UpperCamelCase: Optional[Any] = number % 10
sum_of_digits += last_digit
_UpperCamelCase: int = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCAmelCase_ ( lowercase: int = 100 ) -> int:
'''simple docstring'''
_UpperCamelCase: str = factorial(lowercase )
_UpperCamelCase: List[Any] = split_and_add(lowercase )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip()))) | 264 | def lowerCAmelCase_ ( lowercase: int = 10**9 ) -> int:
'''simple docstring'''
_UpperCamelCase: List[Any] = 1
_UpperCamelCase: Dict = 2
_UpperCamelCase: Tuple = 0
_UpperCamelCase: int = 0
_UpperCamelCase: Dict = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_UpperCamelCase: str = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""") | 264 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : int = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "bird"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
a_ = pipe.prepare_image_inputs([canny_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "Chef in the kitchen"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
a_ = pipe.prepare_image_inputs([pose_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 685 | 0 |
_lowercase : Any ='''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def A__ ( ) -> None:
A : Dict =input('Enter message: ' )
A : Union[str, Any] =input('Enter key [alphanumeric]: ' )
A : Any =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
A : List[str] ='encrypt'
A : str =encrypt_message(lowercase, lowercase )
elif mode.lower().startswith('d' ):
A : List[Any] ='decrypt'
A : Optional[Any] =decrypt_message(lowercase, lowercase )
print(F'\n{mode.title()}ed message:' )
print(lowercase )
def A__ ( lowercase: str, lowercase: str ) -> str:
return translate_message(lowercase, lowercase, 'encrypt' )
def A__ ( lowercase: str, lowercase: str ) -> str:
return translate_message(lowercase, lowercase, 'decrypt' )
def A__ ( lowercase: str, lowercase: str, lowercase: str ) -> str:
A : str =[]
A : Dict =0
A : List[str] =key.upper()
for symbol in message:
A : List[Any] =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowercase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowercase ):
A : Union[str, Any] =0
else:
translated.append(lowercase )
return "".join(lowercase )
if __name__ == "__main__":
main()
| 661 | import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : Tuple=30 , SCREAMING_SNAKE_CASE__ : int=4_00 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : str=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Any=1 / 2_55 , SCREAMING_SNAKE_CASE__ : int=True , ) -> Optional[int]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A : Optional[Any] =size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
A : Union[str, Any] =parent
A : Union[str, Any] =batch_size
A : Union[str, Any] =num_channels
A : int =min_resolution
A : List[Any] =max_resolution
A : Dict =do_resize
A : Tuple =size
A : List[str] =do_normalize
A : List[Any] =image_mean
A : Dict =image_std
A : Any =do_rescale
A : List[str] =rescale_factor
A : Optional[Any] =do_pad
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict=False ) -> Dict:
if not batched:
A : Any =image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ):
A , A : Union[str, Any] =image.size
else:
A , A : Tuple =image.shape[1], image.shape[2]
if w < h:
A : Any =int(self.size['shortest_edge'] * h / w )
A : Any =self.size['shortest_edge']
elif w > h:
A : Dict =self.size['shortest_edge']
A : Dict =int(self.size['shortest_edge'] * w / h )
else:
A : List[str] =self.size['shortest_edge']
A : Dict =self.size['shortest_edge']
else:
A : List[Any] =[]
for image in image_inputs:
A , A : int =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A : str =max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[0] )[0]
A : Tuple =max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
A : str =ConditionalDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
A : Tuple =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_std' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'size' ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : int =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
A : str =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE__ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
A : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
A : List[Any] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : List[str] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A , A : Union[str, Any] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
# Initialize image_processing
A : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
A : Tuple =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : Any =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
A , A : Optional[int] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
A : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
A : Optional[int] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : Tuple =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
A , A : int =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Union[str, Any]:
# prepare image and target
A : Union[str, Any] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
A : List[Any] =json.loads(f.read() )
A : Any ={'image_id': 3_97_69, 'annotations': target}
# encode them
A : str =ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
A : Any =image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# verify pixel values
A : Optional[Any] =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE__ )
A : List[str] =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
A : Dict =torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
A : str =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
A : Dict =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
A : List[str] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
A : Union[str, Any] =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE__ ) )
# verify orig_size
A : List[Any] =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE__ ) )
# verify size
A : int =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
# prepare image, target and masks_path
A : List[str] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
A : Optional[int] =json.loads(f.read() )
A : int ={'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
A : Optional[Any] =pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
A : List[Any] =ConditionalDetrImageProcessor(format='coco_panoptic' )
A : Union[str, Any] =image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , masks_path=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# verify pixel values
A : Dict =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE__ )
A : Dict =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
A : Optional[int] =torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
A : List[Any] =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE__ )
A : Any =torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
A : List[Any] =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
A : Any =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
A : str =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE__ ) )
# verify masks
A : int =82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , SCREAMING_SNAKE_CASE__ )
# verify orig_size
A : Any =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE__ ) )
# verify size
A : str =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE__ ) )
| 661 | 1 |
import numpy as np
def lowercase__ ( A_: np.ndarray ) -> np.ndarray:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def lowercase__ ( A_: np.ndarray ) -> np.ndarray:
"""simple docstring"""
return vector * sigmoid(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
torch.manual_seed(0 )
_lowercase = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = self.dummy_uncond_unet
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ).images
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ,return_dict=__A )[0]
_lowercase = image[0, -3:, -3:, -1]
_lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_lowercase = 'google/ncsnpp-celebahq-256'
_lowercase = UNetaDModel.from_pretrained(__A )
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=20 ,generator=__A ,output_type='numpy' ).images
_lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 67 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase_ = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCAmelCase_ ( lowercase: List[Any] ) -> Any:
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def lowerCAmelCase_ ( lowercase: Any ) -> Dict:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase )
def lowerCAmelCase_ ( lowercase: Optional[Any] ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase: List[str] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase , id=lowercase )
def lowerCAmelCase_ ( lowercase: Optional[Any] , lowercase: Dict ) -> Union[str, Any]:
'''simple docstring'''
if exitstatus == 5:
_UpperCamelCase: List[Any] = 0
# Doctest custom flag to ignore output.
UpperCAmelCase_ = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCAmelCase_ = doctest.OutputChecker
class __magic_name__ ( __lowerCAmelCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[int] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ):
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase_ = CustomOutputChecker
UpperCAmelCase_ = HfDoctestModule
UpperCAmelCase_ = HfDocTestParser | 718 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 264 | 0 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : Any = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( __a , unittest.TestCase ):
a__ :Tuple = GPTSwaTokenizer
a__ :Optional[int] = False
a__ :Union[str, Any] = True
a__ :Any = False
def A_ (self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ : List[Any] = GPTSwaTokenizer(__UpperCamelCase , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def A_ (self , __UpperCamelCase ) -> Any:
UpperCamelCase_ : str = """This is a test"""
UpperCamelCase_ : Dict = """This is a test"""
return input_text, output_text
def A_ (self ) -> List[str]:
UpperCamelCase_ : Union[str, Any] = """<s>"""
UpperCamelCase_ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def A_ (self ) -> Optional[Any]:
UpperCamelCase_ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__UpperCamelCase ) , 2_000 )
def A_ (self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 2_000 )
def A_ (self ) -> Optional[int]:
UpperCamelCase_ : Tuple = GPTSwaTokenizer(__UpperCamelCase )
UpperCamelCase_ : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [465, 287, 265, 631, 842] )
UpperCamelCase_ : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
__UpperCamelCase , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
UpperCamelCase_ : Any = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
UpperCamelCase_ : Dict = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
# fmt: off
self.assertListEqual(
__UpperCamelCase , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def A_ (self ) -> List[str]:
UpperCamelCase_ : List[Any] = GPTSwaTokenizer(__UpperCamelCase )
UpperCamelCase_ : int = ["""This is a test""", """I was born in 92000, and this is falsé."""]
UpperCamelCase_ : Tuple = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertListEqual(tokenizer.encode_fast(__UpperCamelCase ) , __UpperCamelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(tokenizer.decode_fast(__UpperCamelCase ) , __UpperCamelCase )
@slow
def A_ (self ) -> List[Any]:
UpperCamelCase_ : List[str] = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
UpperCamelCase_ : str = {"""input_ids""": [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=__UpperCamelCase , )
| 635 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"configuration_bridgetower": [
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BridgeTowerConfig",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
],
"processing_bridgetower": ["BridgeTowerProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = ["BridgeTowerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = [
"BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST",
"BridgeTowerForContrastiveLearning",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerModel",
"BridgeTowerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 635 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A_ ( lowercase__ ):
'''simple docstring'''
_UpperCamelCase : List[str] = ["""pixel_values"""]
def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BICUBIC , snake_case = True , snake_case = None , snake_case = True , snake_case = 1 / 255 , snake_case = True , snake_case = None , snake_case = None , snake_case = True , **snake_case , ):
super().__init__(**snake_case )
lowercase = size if size is not None else {"shortest_edge": 224}
lowercase = get_size_dict(snake_case , default_to_square=snake_case )
lowercase = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowercase = get_size_dict(snake_case , default_to_square=snake_case , param_name='crop_size' )
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase = do_convert_rgb
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = PILImageResampling.BICUBIC , snake_case = None , **snake_case , ):
lowercase = get_size_dict(snake_case , default_to_square=snake_case )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase = get_resize_output_image_size(snake_case , size=size['shortest_edge'] , default_to_square=snake_case )
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = None , **snake_case , ):
lowercase = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(snake_case , size=(size['height'], size['width']) , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = None , **snake_case , ):
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ):
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ):
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = size if size is not None else self.size
lowercase = get_size_dict(snake_case , param_name='size' , default_to_square=snake_case )
lowercase = resample if resample is not None else self.resample
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(snake_case , param_name='crop_size' , default_to_square=snake_case )
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase = [convert_to_rgb(snake_case ) for image in images]
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(snake_case ) for image in images]
if do_resize:
lowercase = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_center_crop:
lowercase = [self.center_crop(image=snake_case , size=snake_case ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_normalize:
lowercase = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images]
lowercase = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
lowercase = {"pixel_values": images}
return BatchFeature(data=snake_case , tensor_type=snake_case )
| 704 |
from bisect import bisect
from itertools import accumulate
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : x[0] / x[1] , reverse=__SCREAMING_SNAKE_CASE )
lowercase , lowercase = [i[0] for i in r], [i[1] for i in r]
lowercase = list(accumulate(__SCREAMING_SNAKE_CASE ) )
lowercase = bisect(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 565 | 0 |