code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import re
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
lowercase__ = split_input(SCREAMING_SNAKE_CASE )
if upper:
lowercase__ = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowercase__ = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return to_simple_case(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
lowercase__ = to_simple_case(SCREAMING_SNAKE_CASE )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return to_complex_case(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''_''' )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return to_complex_case(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''-''' )
if __name__ == "__main__":
__import__('doctest').testmod()
| 43 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase__ = '''xvjiarui/stable-diffusion-2-inpainting'''
lowercase__ , lowercase__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = num_samples * [init_image]
lowercase__ = num_samples * [mask_image]
lowercase__ , lowercase__ , lowercase__ = pipeline.prepare_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = pipeline(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ )
lowercase__ = output.images.reshape(UpperCamelCase_ , 512 , 512 , 3 )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 43 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 706 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class A ( __UpperCAmelCase ):
__snake_case = 'roberta-prelayernorm'
def __init__( self, UpperCamelCase__=5_0265, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=512, UpperCamelCase__=2, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=1, UpperCamelCase__=0, UpperCamelCase__=2, UpperCamelCase__="absolute", UpperCamelCase__=True, UpperCamelCase__=None, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__, bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = classifier_dropout
class A ( __UpperCAmelCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 325 | 0 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = set({"""(""", """[""", """{"""} )
SCREAMING_SNAKE_CASE__ = set({""")""", """]""", """}"""} )
SCREAMING_SNAKE_CASE__ = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(UpperCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(UpperCamelCase__ ) == 0 or (len(UpperCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(UpperCamelCase__ ) == 0
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = input("""Enter sequence of brackets: """ )
if is_balanced(UpperCamelCase__ ):
print(UpperCamelCase__ , """is balanced""" )
else:
print(UpperCamelCase__ , """is not balanced""" )
if __name__ == "__main__":
main() | 6 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase_ = None
try:
import msvcrt
except ImportError:
lowerCAmelCase_ = None
try:
import fcntl
except ImportError:
lowerCAmelCase_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase_ = OSError
# Data
# ------------------------------------------------
lowerCAmelCase_ = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
lowerCAmelCase_ = '3.0.12'
lowerCAmelCase_ = None
def A__ ( ):
'''simple docstring'''
global _logger
UpperCamelCase : List[str] = _logger or logging.getLogger(__name__)
return _logger
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> int:
'''simple docstring'''
UpperCamelCase : Tuple = lock_file
return None
def __str__( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = f'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> int:
'''simple docstring'''
UpperCamelCase : Optional[int] = lock
return None
def __enter__( self ) -> Optional[int]:
'''simple docstring'''
return self.lock
def __exit__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
'''simple docstring'''
self.lock.release()
return None
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=-1 , lowerCamelCase=None ) -> List[str]:
'''simple docstring'''
UpperCamelCase : Tuple = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
UpperCamelCase : int = self.hash_filename_if_too_long(lowerCamelCase , lowerCamelCase )
# The path to the lock file.
UpperCamelCase : str = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCamelCase : Dict = None
# The default timeout value.
UpperCamelCase : Dict = timeout
# We use this lock primarily for the lock counter.
UpperCamelCase : Any = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCamelCase : Any = 0
return None
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return self._lock_file
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self._timeout
@timeout.setter
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase ) -> List[str]:
'''simple docstring'''
UpperCamelCase : Optional[Any] = float(lowerCamelCase )
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
raise NotImplementedError()
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError()
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
return self._lock_file_fd is not None
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase=None , lowerCamelCase=0.05 ) -> Union[str, Any]:
'''simple docstring'''
if timeout is None:
UpperCamelCase : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCamelCase : str = id(self )
UpperCamelCase : Union[str, Any] = self._lock_file
UpperCamelCase : Union[str, Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(lowerCamelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCamelCase : Tuple = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCamelCase : Optional[Any] = id(self )
UpperCamelCase : Tuple = self._lock_file
logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
UpperCamelCase : List[str] = 0
logger().debug(f'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self ) -> List[Any]:
'''simple docstring'''
self.acquire()
return self
def __exit__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
self.release()
return None
def __del__( self ) -> List[str]:
'''simple docstring'''
self.release(force=lowerCamelCase )
return None
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase ) -> str:
'''simple docstring'''
UpperCamelCase : Tuple = os.path.basename(lowerCamelCase )
if len(lowerCamelCase ) > max_length and max_length > 0:
UpperCamelCase : str = os.path.dirname(lowerCamelCase )
UpperCamelCase : Any = str(hash(lowerCamelCase ) )
UpperCamelCase : Dict = filename[: max_length - len(lowerCamelCase ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(lowerCamelCase , lowerCamelCase )
else:
return path
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=-1 , lowerCamelCase=None ) -> Optional[int]:
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(lowerCamelCase , timeout=lowerCamelCase , max_filename_length=lowerCamelCase )
UpperCamelCase : List[str] = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCamelCase : str = os.open(self._lock_file , lowerCamelCase )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCamelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowerCamelCase )
else:
UpperCamelCase : Dict = fd
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : str = self._lock_file_fd
UpperCamelCase : List[Any] = None
msvcrt.locking(lowerCamelCase , msvcrt.LK_UNLCK , 1 )
os.close(lowerCamelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=-1 , lowerCamelCase=None ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : Optional[int] = os.statvfs(os.path.dirname(lowerCamelCase ) ).f_namemax
super().__init__(lowerCamelCase , timeout=lowerCamelCase , max_filename_length=lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCamelCase : Union[str, Any] = os.open(self._lock_file , lowerCamelCase )
try:
fcntl.flock(lowerCamelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCamelCase )
else:
UpperCamelCase : Any = fd
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self._lock_file_fd
UpperCamelCase : str = None
fcntl.flock(lowerCamelCase , fcntl.LOCK_UN )
os.close(lowerCamelCase )
return None
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : Dict = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCamelCase : Union[str, Any] = os.open(self._lock_file , lowerCamelCase )
except OSError:
pass
else:
UpperCamelCase : Dict = fd
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
os.close(self._lock_file_fd )
UpperCamelCase : Optional[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase_ = None
if msvcrt:
lowerCAmelCase_ = WindowsFileLock
elif fcntl:
lowerCAmelCase_ = UnixFileLock
else:
lowerCAmelCase_ = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 173 | 0 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCAmelCase_ : Optional[int] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCAmelCase_ : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCAmelCase_ : str = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : List[Any] = len([g for position, g in enumerate(__SCREAMING_SNAKE_CASE ) if g == main_target[position]] )
return (item, float(__SCREAMING_SNAKE_CASE ))
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : Dict = random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
_a : Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
_a : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : List[str] = list(__SCREAMING_SNAKE_CASE )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_a : Optional[int] = random.choice(__SCREAMING_SNAKE_CASE )
return "".join(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( A , A , A , ):
'''simple docstring'''
_a : Dict = []
# Generate more children proportionally to the fitness score.
_a : Union[str, Any] = int(parent_a[1] * 1_0_0 ) + 1
_a : int = 1_0 if child_n >= 1_0 else child_n
for _ in range(__SCREAMING_SNAKE_CASE ):
_a : Union[str, Any] = population_score[random.randint(0 , __SCREAMING_SNAKE_CASE )][0]
_a : List[Any] = crossover(parent_a[0] , __SCREAMING_SNAKE_CASE )
# Append new string to the population list.
pop.append(mutate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
pop.append(mutate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
return pop
def UpperCAmelCase_ ( A , A , A = True ):
'''simple docstring'''
if N_POPULATION < N_SELECTED:
_a : Optional[int] = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(__SCREAMING_SNAKE_CASE )
# Verify that the target contains no genes besides the ones inside genes variable.
_a : Tuple = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_a : int = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(__SCREAMING_SNAKE_CASE )
# Generate random starting population.
_a : List[str] = []
for _ in range(__SCREAMING_SNAKE_CASE ):
population.append(''.join([random.choice(__SCREAMING_SNAKE_CASE ) for i in range(len(__SCREAMING_SNAKE_CASE ) )] ) )
# Just some logs to know what the algorithms is doing.
_a : Union[str, Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__SCREAMING_SNAKE_CASE )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_a : List[str] = [evaluate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for item in population]
# Check if there is a matching evolution.
_a : List[str] = sorted(__SCREAMING_SNAKE_CASE , key=lambda A : x[1] , reverse=__SCREAMING_SNAKE_CASE )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_a : Dict = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__SCREAMING_SNAKE_CASE )
# Normalize population score to be between 0 and 1.
_a : int = [
(item, score / len(__SCREAMING_SNAKE_CASE )) for item, score in population_score
]
# This is selection
for i in range(__SCREAMING_SNAKE_CASE ):
population.extend(select(population_score[int(__SCREAMING_SNAKE_CASE )] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__SCREAMING_SNAKE_CASE ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
UpperCAmelCase_ : Union[str, Any] = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 704 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {"vocab_file": "sentencepiece.model"}
UpperCAmelCase_ : Tuple = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
UpperCAmelCase_ : Dict = {
"google/rembert": 256,
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_="[CLS]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="[UNK]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="[PAD]" , lowerCamelCase_="[CLS]" , lowerCamelCase_="[MASK]" , **lowerCamelCase_ , ) -> Optional[int]:
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , )
_a : int = do_lower_case
_a : List[str] = remove_space
_a : Optional[int] = keep_accents
_a : Optional[int] = vocab_file
_a : int = spm.SentencePieceProcessor()
self.sp_model.Load(lowerCamelCase_ )
@property
def __UpperCamelCase ( self ) -> List[str]:
return len(self.sp_model )
def __UpperCamelCase ( self ) -> Optional[Any]:
_a : Any = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
_a : Optional[int] = self.__dict__.copy()
_a : Any = None
return state
def __setstate__( self , lowerCamelCase_ ) -> Union[str, Any]:
_a : str = d
_a : str = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_=False ) -> List[Any]:
_a : Dict = self.sp_model.EncodeAsPieces(lowerCamelCase_ )
return pieces
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Optional[Any]:
return self.sp_model.PieceToId(lowerCamelCase_ )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Dict:
return self.sp_model.IdToPiece(lowerCamelCase_ )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Dict:
_a : int = self.sp_model.decode_pieces(lowerCamelCase_ )
return out_string
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
_a : Dict = [self.sep_token_id]
_a : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
_a : Tuple = [self.sep_token_id]
_a : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowerCamelCase_ ) )
return
_a : Dict = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 424 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = 'pegasus'
lowerCAmelCase = ['past_key_values']
lowerCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int]=5_0_2_6_5 , SCREAMING_SNAKE_CASE : int=1_0_2_4 , SCREAMING_SNAKE_CASE : List[Any]=1_2 , SCREAMING_SNAKE_CASE : int=4_0_9_6 , SCREAMING_SNAKE_CASE : List[str]=1_6 , SCREAMING_SNAKE_CASE : int=1_2 , SCREAMING_SNAKE_CASE : List[str]=4_0_9_6 , SCREAMING_SNAKE_CASE : int=1_6 , SCREAMING_SNAKE_CASE : int=0.0 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : int="gelu" , SCREAMING_SNAKE_CASE : str=1_0_2_4 , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : str=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Dict=0.0_2 , SCREAMING_SNAKE_CASE : Any=0 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : str=0 , SCREAMING_SNAKE_CASE : int=1 , SCREAMING_SNAKE_CASE : str=1 , **SCREAMING_SNAKE_CASE : List[str] , ) -> int:
"""simple docstring"""
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = d_model
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = encoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , forced_eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
@property
def __A ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __A ( self : int ) -> int:
"""simple docstring"""
return self.d_model
| 649 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
lowercase : str = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowercase : Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __a ( A__ ) -> Optional[Any]:
with open(A__ , "rb" ) as f:
lowerCAmelCase = Image.open(A__ )
return im.convert("RGB" )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCAmelCase = field(default=UpperCamelCase_ , metadata={'help': 'A folder containing the training data.'} )
lowerCAmelCase = field(default=UpperCamelCase_ , metadata={'help': 'A folder containing the validation data.'} )
lowerCAmelCase = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCAmelCase = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(UpperCamelCase_ )} , )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
lowerCAmelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase = field(default=UpperCamelCase_ , metadata={'help': 'Name or path of preprocessor config.'} )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def __a ( A__ ) -> Any:
lowerCAmelCase = torch.stack([example["pixel_values"] for example in examples] )
lowerCAmelCase = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __a ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , A__ , A__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(A__ )
transformers.utils.logging.set_verbosity(A__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCAmelCase = {}
if data_args.train_dir is not None:
lowerCAmelCase = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
lowerCAmelCase = os.path.join(data_args.validation_dir , "**" )
lowerCAmelCase = load_dataset(
"imagefolder" , data_files=A__ , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCAmelCase = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , A__ ) and data_args.train_val_split > 0.0:
lowerCAmelCase = dataset["train"].train_test_split(data_args.train_val_split )
lowerCAmelCase = split["train"]
lowerCAmelCase = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowerCAmelCase = dataset["train"].features["labels"].names
lowerCAmelCase , lowerCAmelCase = {}, {}
for i, label in enumerate(A__ ):
lowerCAmelCase = str(A__ )
lowerCAmelCase = label
# Load the accuracy metric from the datasets package
lowerCAmelCase = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(A__ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel=A__ , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowerCAmelCase = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowerCAmelCase = image_processor.size["shortest_edge"]
else:
lowerCAmelCase = (image_processor.size["height"], image_processor.size["width"])
lowerCAmelCase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowerCAmelCase = Compose(
[
RandomResizedCrop(A__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowerCAmelCase = Compose(
[
Resize(A__ ),
CenterCrop(A__ ),
ToTensor(),
normalize,
] )
def train_transforms(A__ ):
lowerCAmelCase = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(A__ ):
lowerCAmelCase = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
lowerCAmelCase = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(A__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
lowerCAmelCase = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(A__ )
# Initalize our trainer
lowerCAmelCase = Trainer(
model=A__ , args=A__ , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=A__ , tokenizer=A__ , data_collator=A__ , )
# Training
if training_args.do_train:
lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase = last_checkpoint
lowerCAmelCase = trainer.train(resume_from_checkpoint=A__ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCAmelCase = trainer.evaluate()
trainer.log_metrics("eval" , A__ )
trainer.save_metrics("eval" , A__ )
# Write model card and (optionally) push to hub
lowerCAmelCase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**A__ )
else:
trainer.create_model_card(**A__ )
if __name__ == "__main__":
main()
| 649 | 1 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
SCREAMING_SNAKE_CASE_ = '''\\n Text data.\n Second line of data.'''
SCREAMING_SNAKE_CASE_ = '''file'''
@pytest.fixture(scope="session" )
def A__ ( A__ ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_UpperCAmelCase = bytes(UpperCamelCase__ , "utf-8" )
with zstd.open(UpperCamelCase__ , "wb" ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture
def A__ ( A__ ) -> List[Any]:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , UpperCamelCase__ ) , "w" ) as f:
f.write(UpperCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def A__ ( A__ , A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_UpperCAmelCase = input_paths[compression_format]
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = DownloadConfig(cache_dir=UpperCamelCase__ , extract_compressed_file=UpperCamelCase__ )
_UpperCAmelCase = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
with open(UpperCamelCase__ ) as f:
_UpperCAmelCase = f.read()
with open(UpperCamelCase__ ) as f:
_UpperCAmelCase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def A__ ( A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = "custom_cache"
_UpperCAmelCase = "custom_extracted_dir"
_UpperCAmelCase = tmp_path / "custom_extracted_path"
if default_extracted:
_UpperCAmelCase = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , UpperCamelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(UpperCamelCase__ ) )
_UpperCAmelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_UpperCAmelCase = xz_file
_UpperCAmelCase = (
DownloadConfig(extract_compressed_file=UpperCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=UpperCamelCase__ )
)
_UpperCAmelCase = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
assert Path(UpperCamelCase__ ).parent.parts[-2:] == expected
def A__ ( A__ ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = str(Path(UpperCamelCase__ ).resolve() )
assert cached_path(UpperCamelCase__ ) == text_file
# relative path
_UpperCAmelCase = str(Path(UpperCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(UpperCamelCase__ ) == text_file
def A__ ( A__ ) -> Any:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
# relative path
_UpperCAmelCase = "./__missing_file__.txt"
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
def A__ ( A__ ) -> str:
'''simple docstring'''
_UpperCAmelCase = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(UpperCamelCase__ ) as f:
_UpperCAmelCase = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , UpperCamelCase__ )
def A__ ( ) -> int:
'''simple docstring'''
with pytest.raises(UpperCamelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , UpperCamelCase__ )
def A__ ( A__ ) -> str:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(UpperCamelCase__ ):
http_get("https://huggingface.co" , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , UpperCamelCase__ )
def A__ ( A__ ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(UpperCamelCase__ ):
ftp_get("ftp://huggingface.co" , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , UpperCamelCase__ )
def A__ ( A__ ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(UpperCamelCase__ ):
fsspec_get("s3://huggingface.co" , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
fsspec_head("s3://huggingface.co" )
| 715 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def A__ ( A__ , A__ ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
_UpperCAmelCase = DatasetInfosDict.from_directory(A__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ),
] , )
def A__ ( A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = str(A__ )
dataset_info.write_to_directory(A__ )
_UpperCAmelCase = DatasetInfo.from_directory(A__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(A__ , "dataset_info.json" ) )
def A__ ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert sorted(A__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_UpperCAmelCase = yaml.safe_dump(A__ )
_UpperCAmelCase = yaml.safe_load(A__ )
assert dataset_info_yaml_dict == reloaded
def A__ ( ) -> int:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo()
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def A__ ( A__ , A__ ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = str(A__ )
dataset_infos_dict.write_to_directory(A__ )
_UpperCAmelCase = DatasetInfosDict.from_directory(A__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_UpperCAmelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_UpperCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(A__ , "README.md" ) )
| 579 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=10 , _UpperCamelCase=3 , _UpperCamelCase=2 , _UpperCamelCase=2 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=10 , _UpperCamelCase=0.02 , _UpperCamelCase="divided_space_time" , _UpperCamelCase=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_frames
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = attention_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
_UpperCAmelCase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = (num_frames) * self.num_patches_per_frame + 1
def UpperCamelCase( self ):
_UpperCAmelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ):
_UpperCAmelCase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
_UpperCAmelCase = self.num_labels
return config
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = TimesformerModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = TimesformerForVideoClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
# verify the logits shape
_UpperCAmelCase = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , unittest.TestCase ):
__A : List[Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__A : Union[str, Any] = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__A : int = False
__A : List[str] = False
__A : int = False
__A : str = False
def UpperCamelCase( self ):
_UpperCAmelCase = TimesformerModelTester(self )
_UpperCAmelCase = ConfigTester(
self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
_UpperCAmelCase = copy.deepcopy(_UpperCamelCase )
if return_labels:
if model_class in get_values(_UpperCamelCase ):
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
return inputs_dict
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TimesformerModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def UpperCamelCase( self ):
if not self.has_attentions:
pass
else:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
_UpperCAmelCase = self.model_tester.seq_length
_UpperCAmelCase = self.model_tester.num_frames
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
_UpperCAmelCase = len(_UpperCamelCase )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(_UpperCamelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def UpperCamelCase( self ):
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
_UpperCAmelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def A__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
_UpperCAmelCase = np.load(SCREAMING_SNAKE_CASE_ )
return list(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase( self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
_UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_video()
_UpperCAmelCase = image_processor(video[:8] , return_tensors='''pt''' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
_UpperCAmelCase = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) ) | 32 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Tuple = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Dict = {"do_clean_text": False, "add_prefix_space": False}
def lowerCamelCase ( self ):
super().setUp()
# fmt: off
UpperCAmelCase__ : Tuple = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
UpperCAmelCase__ : Optional[Any] = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
UpperCAmelCase__ : int = {'''unk_token''': '''<unk>'''}
UpperCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(_UpperCAmelCase ) )
def lowerCamelCase ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase ( self , _UpperCAmelCase ):
UpperCAmelCase__ : Dict = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
UpperCAmelCase__ : Any = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def lowerCamelCase ( self , _UpperCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.get_input_output_texts(_UpperCAmelCase )
UpperCAmelCase__ : Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
UpperCAmelCase__ : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
return text, ids
def lowerCamelCase ( self ):
pass # TODO add if relevant
def lowerCamelCase ( self ):
pass # TODO add if relevant
def lowerCamelCase ( self ):
pass # TODO add if relevant
def lowerCamelCase ( self ):
UpperCAmelCase__ : int = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase__ : Union[str, Any] = '''こんにちは、世界。 こんばんは、㔺界。'''
UpperCAmelCase__ : List[Any] = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
UpperCAmelCase__ : Tuple = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids without special tokens
UpperCAmelCase__ : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
UpperCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids with special tokens
UpperCAmelCase__ : Optional[int] = tokens + [tokenizer.unk_token]
UpperCAmelCase__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
UpperCAmelCase__ : int = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase ( self ):
UpperCAmelCase__ : Tuple = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase__ : Optional[Any] = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
UpperCAmelCase__ : str = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
UpperCAmelCase__ : Optional[Any] = tokenizer.encode(_UpperCAmelCase )
UpperCAmelCase__ : Dict = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
UpperCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
UpperCAmelCase__ : Union[str, Any] = '''こんにちは、世界。'''
UpperCAmelCase__ : List[Any] = '''こんばんは、㔺界。😀'''
UpperCAmelCase__ : Tuple = '''こんにちは、世界。こんばんは、世界。😀'''
UpperCAmelCase__ : Dict = tokenizer.encode(prefix_text + input_text )
UpperCAmelCase__ : List[Any] = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
UpperCAmelCase__ : Tuple = tokenizer.encode(_UpperCAmelCase , prefix_text=_UpperCAmelCase )
UpperCAmelCase__ : Tuple = tokenizer.decode(_UpperCAmelCase )
UpperCAmelCase__ : List[str] = tokenizer.decode(_UpperCAmelCase )
UpperCAmelCase__ : str = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
UpperCAmelCase__ : int = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
UpperCAmelCase__ : int = '''こんにちは、世界。'''
UpperCAmelCase__ : Dict = '''こんばんは、㔺界。😀'''
UpperCAmelCase__ : str = len(tokenizer.encode(_UpperCAmelCase ) ) - 2
UpperCAmelCase__ : Optional[Any] = len(tokenizer.encode(_UpperCAmelCase ) ) - 2
UpperCAmelCase__ : Union[str, Any] = [1] + [0] * (len_prefix + len_text + 1)
UpperCAmelCase__ : str = [1] * (len_prefix + len_text + 1) + [0]
UpperCAmelCase__ : Union[str, Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
UpperCAmelCase__ : str = tokenizer(prefix_text + input_text ).token_type_ids
UpperCAmelCase__ : Dict = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
UpperCAmelCase__ : Tuple = tokenizer(_UpperCAmelCase , prefix_text=_UpperCAmelCase ).token_type_ids
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
UpperCAmelCase__ : List[Any] = tokenizer.encode('''あンいワ''' )
UpperCAmelCase__ : Optional[int] = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
UpperCAmelCase__ : Dict = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(_UpperCAmelCase ) , tokenizer.decode(_UpperCAmelCase ) )
self.assertEqual(tokenizer.decode(_UpperCAmelCase ) , tokenizer.decode(_UpperCAmelCase ) )
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase ( self ):
UpperCAmelCase__ : int = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
UpperCAmelCase__ : Optional[Any] = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
UpperCAmelCase__ : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase )
UpperCAmelCase__ : Dict = tokenizer.batch_encode_plus(_UpperCAmelCase , padding=_UpperCAmelCase )
# fmt: off
UpperCAmelCase__ : int = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
UpperCAmelCase__ : List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
UpperCAmelCase__ : Tuple = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , _UpperCAmelCase )
self.assertListEqual(x_token.token_type_ids , _UpperCAmelCase )
self.assertListEqual(x_token.attention_mask , _UpperCAmelCase )
self.assertListEqual(x_token_a.input_ids , _UpperCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , _UpperCAmelCase )
self.assertListEqual(x_token_a.attention_mask , _UpperCAmelCase )
def lowerCamelCase ( self ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def lowerCamelCase ( self ):
# tokenizer has no padding token
pass | 599 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 599 | 1 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = KandinskyInpaintPipeline
A__ = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
A__ = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
A__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def A_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def A_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
return 32
@property
def A_ ( self : Dict ) -> str:
'''simple docstring'''
return self.time_input_dim
@property
def A_ ( self : Dict ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
return 100
@property
def A_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[str] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def A_ ( self : Tuple ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__snake_case : List[Any] = MultilingualCLIP(__a )
__snake_case : List[str] = text_encoder.eval()
return text_encoder
@property
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Tuple = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__snake_case : Any = UNetaDConditionModel(**__a )
return model
@property
def A_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A_ ( self : int ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def A_ ( self : Dict ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = self.dummy_text_encoder
__snake_case : str = self.dummy_tokenizer
__snake_case : Any = self.dummy_unet
__snake_case : Optional[Any] = self.dummy_movq
__snake_case : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__a , set_alpha_to_one=__a , steps_offset=1 , prediction_type='epsilon' , thresholding=__a , )
__snake_case : List[str] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def A_ ( self : List[str] , __a : Dict , __a : int=0 ) -> Optional[Any]:
'''simple docstring'''
__snake_case : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__a ) ).to(__a )
__snake_case : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__a )
# create init_image
__snake_case : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a )
__snake_case : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case : List[str] = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((256, 256) )
# create mask
__snake_case : Optional[int] = np.ones((64, 64) , dtype=np.floataa )
__snake_case : List[Any] = 0
if str(__a ).startswith('mps' ):
__snake_case : Any = torch.manual_seed(__a )
else:
__snake_case : Optional[int] = torch.Generator(device=__a ).manual_seed(__a )
__snake_case : int = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def A_ ( self : Any ) -> int:
'''simple docstring'''
__snake_case : List[str] = 'cpu'
__snake_case : Tuple = self.get_dummy_components()
__snake_case : Optional[int] = self.pipeline_class(**__a )
__snake_case : Union[str, Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(__a ) )
__snake_case : Dict = output.images
__snake_case : Union[str, Any] = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
__snake_case : int = image[0, -3:, -3:, -1]
__snake_case : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
__snake_case : List[Any] = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def A_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
__snake_case : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__snake_case : Tuple = np.ones((768, 768) , dtype=np.floataa )
__snake_case : Tuple = 0
__snake_case : Tuple = 'a hat'
__snake_case : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__a )
__snake_case : int = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
__snake_case : Optional[Any] = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
__snake_case : Any = torch.Generator(device='cpu' ).manual_seed(0 )
__snake_case , __snake_case : Union[str, Any] = pipe_prior(
__a , generator=__a , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__snake_case : List[str] = pipeline(
__a , image=__a , mask_image=__a , image_embeds=__a , negative_image_embeds=__a , generator=__a , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
__snake_case : int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__a , __a )
| 286 | 1 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase_ :
def __init__( self : Tuple , _lowercase : Optional[int]=2 , _lowercase : Dict=3 , _lowercase : Optional[Any]=6_4 , _lowercase : List[str]=None ) -> Any:
_lowercase = np.random.default_rng(_lowercase )
_lowercase = length
_lowercase = rng.normal(size=(length,) ).astype(np.floataa )
_lowercase = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[int] ) -> int:
return self.length
def __getitem__( self : Any , _lowercase : str ) -> List[str]:
return {"x": self.x[i], "y": self.y[i]}
class UpperCAmelCase_ ( torch.nn.Module ):
def __init__( self : Optional[int] , _lowercase : List[str]=0 , _lowercase : Union[str, Any]=0 , _lowercase : List[str]=False ) -> List[str]:
super().__init__()
_lowercase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowercase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowercase = True
def _lowerCamelCase ( self : Optional[int] , _lowercase : List[Any]=None ) -> Tuple:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
_lowercase = False
return x * self.a[0] + self.b[0]
class UpperCAmelCase_ ( torch.nn.Module ):
def __init__( self : Any , _lowercase : Tuple=0 , _lowercase : Any=0 , _lowercase : Any=False ) -> int:
super().__init__()
_lowercase = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
_lowercase = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
_lowercase = True
def _lowerCamelCase ( self : Any , _lowercase : Optional[int]=None ) -> Dict:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
_lowercase = False
return x * self.a + self.b
def __UpperCAmelCase ( _snake_case : List[str], _snake_case : int = 1_6 ):
from datasets import load_dataset
from transformers import AutoTokenizer
_lowercase = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowercase = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
_lowercase = load_dataset("csv", data_files=_snake_case )
_lowercase = datasets["train"].unique("label" )
_lowercase = {v: i for i, v in enumerate(_snake_case )}
def tokenize_function(_snake_case : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
_lowercase = tokenizer(
examples["sentence1"], examples["sentence2"], truncation=_snake_case, max_length=_snake_case, padding="max_length" )
if "label" in examples:
_lowercase = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowercase = datasets.map(
_snake_case, batched=_snake_case, remove_columns=["sentence1", "sentence2", "label"], )
def collate_fn(_snake_case : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_snake_case, padding="max_length", max_length=1_2_8, return_tensors="pt" )
return tokenizer.pad(_snake_case, padding="longest", return_tensors="pt" )
# Instantiate dataloaders.
_lowercase = DataLoader(tokenized_datasets["train"], shuffle=_snake_case, collate_fn=_snake_case, batch_size=2 )
_lowercase = DataLoader(tokenized_datasets["validation"], shuffle=_snake_case, collate_fn=_snake_case, batch_size=1 )
return train_dataloader, eval_dataloader | 704 | """simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple , _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : List[str] ) -> int:
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for a, b in zip(_lowercase , _lowercase ):
self.assertAlmostEqual(_lowercase , _lowercase , delta=_lowercase )
def _lowerCamelCase ( self : List[str] ) -> Optional[int]:
_lowercase = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_lowercase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def _lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
_lowercase = None
ops.enable_eager_execution_internal()
_lowercase = tf.config.list_physical_devices("CPU" )
if len(_lowercase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
_lowercase = tf.config.list_logical_devices(device_type="CPU" )
_lowercase = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
_lowercase = GradientAccumulator()
_lowercase = tf.Variable([4.0, 3.0] )
_lowercase , _lowercase = create_optimizer(5e-5 , 1_0 , 5 )
_lowercase = tf.Variable([0.0, 0.0] , trainable=_lowercase )
def accumulate_on_replica(_lowercase : Dict ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_lowercase : Optional[Any] , _lowercase : List[Any] ):
with strategy.scope():
_lowercase = strategy.experimental_local_results(_lowercase )
local_variables[0].assign(_lowercase )
local_variables[1].assign(_lowercase )
strategy.run(_lowercase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_lowercase )
def _check_local_values(_lowercase : Union[str, Any] , _lowercase : Any ):
_lowercase = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _lowercase , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , _lowercase , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] ) | 227 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''lxmert'''
_UpperCAmelCase = {}
def __init__( self , snake_case=30522 , snake_case=768 , snake_case=12 , snake_case=9500 , snake_case=1600 , snake_case=400 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-12 , snake_case=9 , snake_case=5 , snake_case=5 , snake_case=2048 , snake_case=4 , snake_case=6.67 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , **snake_case , ) -> Optional[int]:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = num_qa_labels
_UpperCAmelCase = num_object_labels
_UpperCAmelCase = num_attr_labels
_UpperCAmelCase = l_layers
_UpperCAmelCase = x_layers
_UpperCAmelCase = r_layers
_UpperCAmelCase = visual_feat_dim
_UpperCAmelCase = visual_pos_dim
_UpperCAmelCase = visual_loss_normalizer
_UpperCAmelCase = task_matched
_UpperCAmelCase = task_mask_lm
_UpperCAmelCase = task_obj_predict
_UpperCAmelCase = task_qa
_UpperCAmelCase = visual_obj_loss
_UpperCAmelCase = visual_attr_loss
_UpperCAmelCase = visual_feat_loss
_UpperCAmelCase = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**snake_case )
| 573 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
lowercase = logging.getLogger(__name__)
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''masked_bert'''
def __init__( self , snake_case=30522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-12 , snake_case=0 , snake_case="topK" , snake_case="constant" , snake_case=0.0 , **snake_case , ) -> str:
super().__init__(pad_token_id=snake_case , **snake_case )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = pruning_method
_UpperCAmelCase = mask_init
_UpperCAmelCase = mask_scale
| 573 | 1 |
'''simple docstring'''
from __future__ import annotations
import queue
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : Tuple ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : str = data
_UpperCAmelCase : Any = None
_UpperCAmelCase : Tuple = None
def __lowerCAmelCase ():
print("\n********Press N to stop entering at any point of time********\n" )
_UpperCAmelCase : Optional[Any] = input("Enter the value of the root node: " ).strip().lower()
_UpperCAmelCase : queue.Queue = queue.Queue()
_UpperCAmelCase : List[Any] = TreeNode(int(__lowerCAmelCase ) )
q.put(__lowerCAmelCase )
while not q.empty():
_UpperCAmelCase : Tuple = q.get()
_UpperCAmelCase : Union[str, Any] = F"""Enter the left node of {node_found.data}: """
_UpperCAmelCase : Any = input(__lowerCAmelCase ).strip().lower() or "n"
if check == "n":
return tree_node
_UpperCAmelCase : List[Any] = TreeNode(int(__lowerCAmelCase ) )
_UpperCAmelCase : List[Any] = left_node
q.put(__lowerCAmelCase )
_UpperCAmelCase : Any = F"""Enter the right node of {node_found.data}: """
_UpperCAmelCase : str = input(__lowerCAmelCase ).strip().lower() or "n"
if check == "n":
return tree_node
_UpperCAmelCase : Dict = TreeNode(int(__lowerCAmelCase ) )
_UpperCAmelCase : List[str] = right_node
q.put(__lowerCAmelCase )
raise
def __lowerCAmelCase (__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def __lowerCAmelCase (__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def __lowerCAmelCase (__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def __lowerCAmelCase (__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
_UpperCAmelCase : queue.Queue = queue.Queue()
q.put(__lowerCAmelCase )
while not q.empty():
_UpperCAmelCase : List[str] = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __lowerCAmelCase (__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
_UpperCAmelCase : queue.Queue = queue.Queue()
q.put(__lowerCAmelCase )
while not q.empty():
_UpperCAmelCase : Optional[int] = []
while not q.empty():
_UpperCAmelCase : List[Any] = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
_UpperCAmelCase : list[TreeNode] = []
_UpperCAmelCase : List[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(__lowerCAmelCase )
_UpperCAmelCase : Any = n.left
# end of while means current node doesn't have left child
_UpperCAmelCase : str = stack.pop()
# start to traverse its right child
_UpperCAmelCase : List[Any] = n.right
def __lowerCAmelCase (__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
_UpperCAmelCase : list[TreeNode] = []
_UpperCAmelCase : Optional[Any] = node
while n or stack:
while n:
stack.append(__lowerCAmelCase )
_UpperCAmelCase : int = n.left
_UpperCAmelCase : int = stack.pop()
print(n.data , end="," )
_UpperCAmelCase : int = n.right
def __lowerCAmelCase (__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
_UpperCAmelCase , _UpperCAmelCase : Dict = [], []
_UpperCAmelCase : Any = node
stacka.append(__lowerCAmelCase )
while stacka: # to find the reversed order of post order, store it in stack2
_UpperCAmelCase : List[Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__lowerCAmelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def __lowerCAmelCase (__lowerCAmelCase = "" , __lowerCAmelCase=50 , __lowerCAmelCase="*" ):
if not s:
return "\n" + width * char
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = divmod(width - len(__lowerCAmelCase ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
lowerCamelCase__ = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 40 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCamelCase__ = state_dict['cls.predictions.decoder.weight']
lowerCamelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.dense.{w}''']
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 40 | 1 |
import math
class SCREAMING_SNAKE_CASE :
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : list[list[float]] , a : list[int] )-> int:
"""simple docstring"""
lowercase__ = 0.0
lowercase__ = 0.0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def SCREAMING_SNAKE_CASE_ ( self : Any , a : list[list[int | float]] , a : list[int] , a : int , a : float )-> list[list[int | float]]:
"""simple docstring"""
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __UpperCamelCase () -> None:
# Training Examples ( m, n )
lowercase__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowercase__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowercase__ = SelfOrganizingMap()
lowercase__ = 3
lowercase__ = 0.5
for _ in range(__snake_case ):
for j in range(len(__snake_case ) ):
# training sample
lowercase__ = training_samples[j]
# Compute the winning vector
lowercase__ = self_organizing_map.get_winner(__snake_case , __snake_case )
# Update the winning vector
lowercase__ = self_organizing_map.update(__snake_case , __snake_case , __snake_case , __snake_case )
# classify test sample
lowercase__ = [0, 0, 0, 1]
lowercase__ = self_organizing_map.get_winner(__snake_case , __snake_case )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 235 |
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def _lowercase ( ) -> None:
assert or_gate(0 ,0 ) == 0
assert or_gate(0 ,1 ) == 1
assert or_gate(1 ,0 ) == 1
assert or_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1)) | 293 | 0 |
"""simple docstring"""
def a_ ( lowerCamelCase ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 632 | """simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCAmelCase__ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCAmelCase__ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowerCAmelCase__ : set[int] = {ord(char) for char in VALID_CHARS}
lowerCAmelCase__ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = ""
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
for keychar, cipherchar in zip(cycle(lowerCamelCase ) , lowerCamelCase ):
UpperCAmelCase__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCamelCase )
return decoded
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
for key in product(lowerCamelCase , repeat=3 ):
UpperCAmelCase__ = try_key(lowerCamelCase , lowerCamelCase )
if encoded is not None:
possibles.append(lowerCamelCase )
return possibles
def a_ ( lowerCamelCase , lowerCamelCase ):
return [possible for possible in possibles if common_word in possible.lower()]
def a_ ( lowerCamelCase = "p059_cipher.txt" ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = Path(lowerCamelCase ).parent.joinpath(lowerCamelCase ).read_text(encoding='utf-8' )
UpperCAmelCase__ = [int(lowerCamelCase ) for number in data.strip().split(',' )]
UpperCAmelCase__ = filter_valid_chars(lowerCamelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase__ = filter_common_word(lowerCamelCase , lowerCamelCase )
if len(lowerCamelCase ) == 1:
break
UpperCAmelCase__ = possibles[0]
return sum(ord(lowerCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 632 | 1 |
'''simple docstring'''
import cva
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
if k in (0.04, 0.06):
UpperCamelCase : Optional[int] = k
UpperCamelCase : Union[str, Any] = window_size
else:
raise ValueError("invalid k value" )
def __str__( self ) -> str:
'''simple docstring'''
return str(self.k )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase ) -> tuple[cva.Mat, list[list[int]]]:
'''simple docstring'''
UpperCamelCase : Any = cva.imread(lowerCamelCase , 0 )
UpperCamelCase , UpperCamelCase : List[Any] = img.shape
UpperCamelCase : list[list[int]] = []
UpperCamelCase : List[str] = img.copy()
UpperCamelCase : List[str] = cva.cvtColor(lowerCamelCase , cva.COLOR_GRAY2RGB )
UpperCamelCase , UpperCamelCase : List[Any] = np.gradient(lowerCamelCase )
UpperCamelCase : Any = dx**2
UpperCamelCase : Optional[Any] = dy**2
UpperCamelCase : Any = dx * dy
UpperCamelCase : Any = 0.04
UpperCamelCase : List[Any] = self.window_size // 2
for y in range(lowerCamelCase , h - offset ):
for x in range(lowerCamelCase , w - offset ):
UpperCamelCase : Dict = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase : List[str] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase : Optional[int] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase : List[Any] = (wxx * wyy) - (wxy**2)
UpperCamelCase : Optional[int] = wxx + wyy
UpperCamelCase : Tuple = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
lowerCAmelCase_ = HarrisCorner(0.04, 3)
lowerCAmelCase_ , lowerCAmelCase_ = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 173 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
UpperCamelCase : List[Any] = VideoClassificationPipeline(model=lowerCamelCase , image_processor=lowerCamelCase , top_k=2 )
UpperCamelCase : List[str] = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase ) -> List[Any]:
'''simple docstring'''
for example in examples:
UpperCamelCase : int = video_classifier(lowerCamelCase )
self.assertEqual(
lowerCamelCase , [
{"score": ANY(lowerCamelCase ), "label": ANY(lowerCamelCase )},
{"score": ANY(lowerCamelCase ), "label": ANY(lowerCamelCase )},
] , )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : List[Any] = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
UpperCamelCase : Any = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
UpperCamelCase : List[str] = pipeline(
"video-classification" , model=lowerCamelCase , feature_extractor=lowerCamelCase , frame_sampling_rate=4 )
UpperCamelCase : int = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
UpperCamelCase : List[str] = video_classifier(lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}] , )
UpperCamelCase : List[Any] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
] , )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
| 173 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class UpperCamelCase ( a__ ):
lowercase = "SpeechT5FeatureExtractor"
lowercase = "SpeechT5Tokenizer"
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowerCamelCase_ ,lowerCamelCase_ )
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Tuple = kwargs.pop('audio' ,lowerCamelCase_ )
lowercase_ : Dict = kwargs.pop('text' ,lowerCamelCase_ )
lowercase_ : Union[str, Any] = kwargs.pop('text_target' ,lowerCamelCase_ )
lowercase_ : Optional[int] = kwargs.pop('audio_target' ,lowerCamelCase_ )
lowercase_ : Dict = kwargs.pop('sampling_rate' ,lowerCamelCase_ )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
lowercase_ : Union[str, Any] = self.feature_extractor(lowerCamelCase_ ,*lowerCamelCase_ ,sampling_rate=lowerCamelCase_ ,**lowerCamelCase_ )
elif text is not None:
lowercase_ : Union[str, Any] = self.tokenizer(lowerCamelCase_ ,**lowerCamelCase_ )
else:
lowercase_ : Union[str, Any] = None
if audio_target is not None:
lowercase_ : Dict = self.feature_extractor(audio_target=lowerCamelCase_ ,*lowerCamelCase_ ,sampling_rate=lowerCamelCase_ ,**lowerCamelCase_ )
lowercase_ : int = targets['input_values']
elif text_target is not None:
lowercase_ : Optional[int] = self.tokenizer(lowerCamelCase_ ,**lowerCamelCase_ )
lowercase_ : Dict = targets['input_ids']
else:
lowercase_ : Tuple = None
if inputs is None:
return targets
if targets is not None:
lowercase_ : str = labels
lowercase_ : Dict = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowercase_ : Dict = decoder_attention_mask
return inputs
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Any = kwargs.pop('input_values' ,lowerCamelCase_ )
lowercase_ : str = kwargs.pop('input_ids' ,lowerCamelCase_ )
lowercase_ : Union[str, Any] = kwargs.pop('labels' ,lowerCamelCase_ )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
lowercase_ : List[str] = self.feature_extractor.pad(lowerCamelCase_ ,*lowerCamelCase_ ,**lowerCamelCase_ )
elif input_ids is not None:
lowercase_ : Tuple = self.tokenizer.pad(lowerCamelCase_ ,**lowerCamelCase_ )
else:
lowercase_ : List[str] = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowerCamelCase_ ,lowerCamelCase_ ) and "input_ids" in labels[0]):
lowercase_ : Any = self.tokenizer.pad(lowerCamelCase_ ,**lowerCamelCase_ )
lowercase_ : str = targets['input_ids']
else:
lowercase_ : Any = self.feature_extractor.feature_size
lowercase_ : int = self.feature_extractor.num_mel_bins
lowercase_ : Optional[Any] = self.feature_extractor.pad(lowerCamelCase_ ,*lowerCamelCase_ ,**lowerCamelCase_ )
lowercase_ : List[Any] = feature_size_hack
lowercase_ : Optional[Any] = targets['input_values']
else:
lowercase_ : Tuple = None
if inputs is None:
return targets
if targets is not None:
lowercase_ : str = labels
lowercase_ : Tuple = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowercase_ : Any = decoder_attention_mask
return inputs
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_ ,**lowerCamelCase_ )
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_ ,**lowerCamelCase_ )
| 710 | """simple docstring"""
__SCREAMING_SNAKE_CASE ="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowercase__( __SCREAMING_SNAKE_CASE : bytes ):
# Make sure the supplied data is a bytes-like object
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = ''.join(bin(__SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data )
lowercase_ : List[Any] = len(__SCREAMING_SNAKE_CASE ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase_ : Optional[int] = b'=' * ((6 - len(__SCREAMING_SNAKE_CASE ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__SCREAMING_SNAKE_CASE ) % 6)
else:
lowercase_ : List[Any] = b''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__SCREAMING_SNAKE_CASE ) , 6 ) ).encode()
+ padding
)
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = (
'argument should be a bytes-like object or ASCII string, '
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(__SCREAMING_SNAKE_CASE )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
try:
lowercase_ : List[str] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
lowercase_ : List[str] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase_ : Union[str, Any] = encoded_data[:-padding]
lowercase_ : List[Any] = ''.join(
bin(B64_CHARSET.index(__SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase_ : int = ''.join(
bin(B64_CHARSET.index(__SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )
lowercase_ : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__SCREAMING_SNAKE_CASE ) , 8 )
]
return bytes(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 477 | 0 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> bool:
'''simple docstring'''
snake_case : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
snake_case : Any = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
snake_case : Union[str, Any] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
snake_case : List[str] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
snake_case : int = subset[i - 1][j]
if arr[i - 1] <= j:
snake_case : str = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638 |
'''simple docstring'''
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
lowercase__ = get_logger(__name__)
class snake_case__ ( enum.Enum ):
"""simple docstring"""
lowerCamelCase = """all_checks"""
lowerCamelCase = """basic_checks"""
lowerCamelCase = """no_checks"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> str:
'''simple docstring'''
if expected_checksums is None:
logger.info('''Unable to verify checksums.''' )
return
if len(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) )
if len(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) )
snake_case : Any = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
snake_case : Union[str, Any] = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
'''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' )
logger.info('''All the checksums matched successfully''' + for_verification_name )
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
'''simple docstring'''
if expected_splits is None:
logger.info('''Unable to verify splits sizes.''' )
return
if len(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) > 0:
raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) )
if len(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) > 0:
raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) )
snake_case : Dict = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE__ ) )
logger.info('''All the splits matched successfully.''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True ) -> dict:
'''simple docstring'''
if record_checksum:
snake_case : Any = shaaaa()
with open(SCREAMING_SNAKE_CASE__ , '''rb''' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B'''''' ):
m.update(SCREAMING_SNAKE_CASE__ )
snake_case : Any = m.hexdigest()
else:
snake_case : Tuple = None
return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE__ ), "checksum": checksum}
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Dict:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 638 | 1 |
_lowerCAmelCase = 9.8_0665
def a__ ( a , a , a = g ) -> float:
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 236 | from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __UpperCAmelCase( A__ ):
"""simple docstring"""
def __init__( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = True , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = True , __magic_name__ = "arrow" , **__magic_name__ , ):
"""simple docstring"""
super().__init__(
split=__magic_name__ , features=__magic_name__ , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ , streaming=__magic_name__ , **__magic_name__ , )
A_ : Tuple = load_from_cache_file
A_ : List[str] = file_format
A_ : List[Any] = Spark(
df=__magic_name__ , features=__magic_name__ , cache_dir=__magic_name__ , working_dir=__magic_name__ , **__magic_name__ , )
def UpperCAmelCase ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
A_ : Optional[Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__magic_name__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 236 | 1 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
a_ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __lowerCAmelCase ( A_ : List[Any] , A_ : int , A_ : List[Any] , A_ : Union[str, Any] , A_ : str ) -> Optional[Any]:
for attribute in key.split("." ):
__UpperCAmelCase = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
__UpperCAmelCase = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
__UpperCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__UpperCAmelCase = value
elif weight_type == "weight_g":
__UpperCAmelCase = value
elif weight_type == "weight_v":
__UpperCAmelCase = value
elif weight_type == "bias":
__UpperCAmelCase = value
else:
__UpperCAmelCase = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCAmelCase ( A_ : List[Any] , A_ : str ) -> List[Any]:
__UpperCAmelCase = []
__UpperCAmelCase = fairseq_model.state_dict()
__UpperCAmelCase = hf_model.feature_extractor
__UpperCAmelCase = hf_model.adapter
for name, value in fairseq_dict.items():
__UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == "group" , )
__UpperCAmelCase = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__UpperCAmelCase = True
if "*" in mapped_key:
__UpperCAmelCase = name.split(UpperCamelCase_ )[0].split("." )[-2]
__UpperCAmelCase = mapped_key.replace("*" , UpperCamelCase_ )
if "weight_g" in name:
__UpperCAmelCase = """weight_g"""
elif "weight_v" in name:
__UpperCAmelCase = """weight_v"""
elif "bias" in name:
__UpperCAmelCase = """bias"""
elif "weight" in name:
__UpperCAmelCase = """weight"""
else:
__UpperCAmelCase = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCAmelCase ( A_ : List[Any] , A_ : Tuple , A_ : Any , A_ : Dict , A_ : int ) -> str:
__UpperCAmelCase = full_name.split("conv_layers." )[-1]
__UpperCAmelCase = name.split("." )
__UpperCAmelCase = int(items[0] )
__UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase_ )
def __lowerCAmelCase ( A_ : Union[str, Any] , A_ : Any , A_ : List[str] , A_ : Dict ) -> Optional[int]:
__UpperCAmelCase = full_name.split("adaptor." )[-1]
__UpperCAmelCase = name.split("." )
if items[1].isdigit():
__UpperCAmelCase = int(items[1] )
else:
__UpperCAmelCase = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
__UpperCAmelCase = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
__UpperCAmelCase = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
__UpperCAmelCase = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
__UpperCAmelCase = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
__UpperCAmelCase = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
__UpperCAmelCase = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase_ )
def __lowerCAmelCase ( A_ : List[Any] ) -> str:
__UpperCAmelCase = emb.weight.shape
__UpperCAmelCase = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
__UpperCAmelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def __lowerCAmelCase ( A_ : List[str] , A_ : int , A_ : List[str] , A_ : str , A_ : List[str] , A_ : Optional[Any] , A_ : Dict , A_ : List[Any] , A_ : Optional[Any] , A_ : List[str] , A_ : int , ) -> str:
__UpperCAmelCase = WavaVecaConfig.from_pretrained(
UpperCamelCase_ , add_adapter=UpperCamelCase_ , adapter_stride=UpperCamelCase_ , adapter_kernel_size=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , output_hidden_size=UpperCamelCase_ , )
__UpperCAmelCase = MBartConfig.from_pretrained(UpperCamelCase_ )
# load model
__UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
__UpperCAmelCase = model[0].eval()
# load feature extractor
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ , use_auth_token=UpperCamelCase_ )
# set weights for wav2vec2 encoder
__UpperCAmelCase = WavaVecaModel(UpperCamelCase_ )
recursively_load_weights_wavaveca(model.encoder , UpperCamelCase_ )
# load decoder weights
__UpperCAmelCase = MBartForCausalLM(UpperCamelCase_ )
__UpperCAmelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase_ )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
__UpperCAmelCase = SpeechEncoderDecoderModel(encoder=UpperCamelCase_ , decoder=UpperCamelCase_ )
__UpperCAmelCase = False
__UpperCAmelCase = MBartaaTokenizer(UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
__UpperCAmelCase = hf_wavavec.config.to_dict()
__UpperCAmelCase = tokenizer.pad_token_id
__UpperCAmelCase = tokenizer.bos_token_id
__UpperCAmelCase = tokenizer.eos_token_id
__UpperCAmelCase = """mbart50"""
__UpperCAmelCase = """wav2vec2"""
__UpperCAmelCase = tokenizer.eos_token_id
__UpperCAmelCase = 25_00_04
__UpperCAmelCase = tokenizer.eos_token_id
__UpperCAmelCase = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase_ )
hf_wavavec.save_pretrained(UpperCamelCase_ )
feature_extractor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=250004, type=int, help="""`decoder_start_token_id` of model config""")
a_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 221 |
import math
from datetime import datetime, timedelta
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = year % 19
_lowerCAmelCase : Tuple = year % 4
_lowerCAmelCase : Dict = year % 7
_lowerCAmelCase : Optional[Any] = math.floor(year / 100 )
_lowerCAmelCase : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
_lowerCAmelCase : int = leap_day_inhibits / 4
_lowerCAmelCase : List[Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
_lowerCAmelCase : str = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_lowerCAmelCase : Any = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
_lowerCAmelCase : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase_ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase_ , 4 , 18 )
else:
return datetime(UpperCamelCase_ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
_lowerCamelCase : List[Any] = "will be" if year > datetime.now().year else "was"
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 429 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = """switch_transformers"""
SCREAMING_SNAKE_CASE_ : Tuple = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ : Optional[int] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , lowerCAmelCase__=32_128 , lowerCAmelCase__=768 , lowerCAmelCase__=64 , lowerCAmelCase__=2_048 , lowerCAmelCase__=64 , lowerCAmelCase__=12 , lowerCAmelCase__=3 , lowerCAmelCase__=12 , lowerCAmelCase__=3 , lowerCAmelCase__=12 , lowerCAmelCase__=8 , lowerCAmelCase__=False , lowerCAmelCase__=0.01 , lowerCAmelCase__="float32" , lowerCAmelCase__=False , lowerCAmelCase__=32 , lowerCAmelCase__=128 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1e-6 , lowerCAmelCase__=0.0_01 , lowerCAmelCase__=0.0_01 , lowerCAmelCase__=1.0 , lowerCAmelCase__="relu" , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0 , lowerCAmelCase__=1 , **lowerCAmelCase__ , ) -> int:
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = d_kv
SCREAMING_SNAKE_CASE = d_ff
SCREAMING_SNAKE_CASE = num_sparse_encoder_layers
SCREAMING_SNAKE_CASE = num_layers
SCREAMING_SNAKE_CASE = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
SCREAMING_SNAKE_CASE = self.num_layers // self.num_sparse_encoder_layers
else:
SCREAMING_SNAKE_CASE = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
SCREAMING_SNAKE_CASE = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
SCREAMING_SNAKE_CASE = self.num_decoder_layers # HACK: this will create 0 sparse layers
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = num_experts
SCREAMING_SNAKE_CASE = expert_capacity
SCREAMING_SNAKE_CASE = router_bias
SCREAMING_SNAKE_CASE = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
SCREAMING_SNAKE_CASE = router_dtype
SCREAMING_SNAKE_CASE = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE = relative_attention_num_buckets
SCREAMING_SNAKE_CASE = relative_attention_max_distance
SCREAMING_SNAKE_CASE = dropout_rate
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_factor
SCREAMING_SNAKE_CASE = feed_forward_proj
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = add_router_probs
SCREAMING_SNAKE_CASE = router_z_loss_coef
SCREAMING_SNAKE_CASE = router_aux_loss_coef
SCREAMING_SNAKE_CASE = self.feed_forward_proj.split('-' )
SCREAMING_SNAKE_CASE = act_info[-1]
SCREAMING_SNAKE_CASE = act_info[0] == 'gated'
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE = 'gelu_new'
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 327 |
"""simple docstring"""
from math import sqrt
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> int:
SCREAMING_SNAKE_CASE = 0
for i in range(1 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE_ ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE_ ):
total += i
return total - n
def lowercase (SCREAMING_SNAKE_CASE_ : int = 1_00_00 ) -> int:
SCREAMING_SNAKE_CASE = sum(
i
for i in range(1 , SCREAMING_SNAKE_CASE_ )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE_ ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 327 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def __magic_name__ ( self ) -> str:
torch.manual_seed(0 )
__a : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __magic_name__ ( self ) -> Dict:
__a : Optional[int] = self.dummy_uncond_unet
__a : Optional[Any] = KarrasVeScheduler()
__a : Tuple = KarrasVePipeline(unet=_A , scheduler=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__a : Tuple = torch.manual_seed(0 )
__a : Tuple = pipe(num_inference_steps=2 , generator=_A , output_type='numpy' ).images
__a : Optional[int] = torch.manual_seed(0 )
__a : Any = pipe(num_inference_steps=2 , generator=_A , output_type='numpy' , return_dict=_A )[0]
__a : Union[str, Any] = image[0, -3:, -3:, -1]
__a : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self ) -> Dict:
__a : int = 'google/ncsnpp-celebahq-256'
__a : int = UNetaDModel.from_pretrained(_A )
__a : Tuple = KarrasVeScheduler()
__a : Tuple = KarrasVePipeline(unet=_A , scheduler=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__a : Dict = torch.manual_seed(0 )
__a : str = pipe(num_inference_steps=20 , generator=_A , output_type='numpy' ).images
__a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__a : Optional[Any] = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 597 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
SCREAMING_SNAKE_CASE_ = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"]
SCREAMING_SNAKE_CASE_ = {"bart.large": BartModel, "bart.large.mnli": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("0.9.0"):
raise Exception("requires fairseq >= 0.9.0")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = " Hello world! cécé herlolip"
SCREAMING_SNAKE_CASE_ = [
("model.classification_heads.mnli.dense.weight", "classification_head.dense.weight"),
("model.classification_heads.mnli.dense.bias", "classification_head.dense.bias"),
("model.classification_heads.mnli.out_proj.weight", "classification_head.out_proj.weight"),
("model.classification_heads.mnli.out_proj.bias", "classification_head.out_proj.bias"),
]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
__a : Dict = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Dict = dct.pop(SCREAMING_SNAKE_CASE__ )
__a : Dict = val
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
__a : Dict = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )
__a : Dict = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
__a , __a : Dict = emb.weight.shape
__a : Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
__a : Tuple = torch.hub.load('pytorch/fairseq' , SCREAMING_SNAKE_CASE__ ).eval()
else:
__a : Optional[int] = load_xsum_checkpoint(SCREAMING_SNAKE_CASE__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__a : List[str] = checkpoint_path.replace('.' , '-' )
__a : Optional[Any] = BartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = bart.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
__a : List[str] = BartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ).encode(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).all():
raise ValueError(
f'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
__a : List[Any] = bart.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
__a : str = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Dict = BartForSequenceClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__a : Any = bart.predict('mnli' , SCREAMING_SNAKE_CASE__ , return_logits=SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = model(SCREAMING_SNAKE_CASE__ )[0] # logits
else: # no classification heads to worry about
__a : Dict = bart.model.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = state_dict['decoder.embed_tokens.weight']
__a : List[Any] = bart.extract_features(SCREAMING_SNAKE_CASE__ )
if hf_checkpoint_name == "facebook/bart-large":
__a : Dict = BartModel(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__a : str = model(SCREAMING_SNAKE_CASE__ ).model[0]
else:
__a : Optional[Any] = BartForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__ , 'lm_head' ):
__a : Optional[int] = make_linear_from_emb(model.model.shared )
__a : List[Any] = model.model(SCREAMING_SNAKE_CASE__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config", default=None, type=str, help="Which huggingface architecture to use: bart-large-xsum"
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 597 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
assert (
isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and number_of_steps > 0
), F"number_of_steps needs to be positive integer, your input {number_of_steps}"
if number_of_steps == 1:
return 1
__snake_case , __snake_case : Optional[int] = 1, 1
for _ in range(number_of_steps - 1 ):
__snake_case , __snake_case : Tuple = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 192 | """simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : List[Any]) -> Dict:
__snake_case : int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
__snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2').to(_A)
__snake_case : Union[str, Any] = -1
__snake_case : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
__snake_case : Any = model.generate(_A , max_new_tokens=10 , do_sample=_A)
__snake_case : str = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
__snake_case : List[str] = TextStreamer(_A)
model.generate(_A , max_new_tokens=10 , do_sample=_A , streamer=_A)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__snake_case : Optional[Any] = cs.out[:-1]
self.assertEqual(_A , _A)
def _lowercase (self : int) -> Optional[Any]:
__snake_case : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
__snake_case : List[Any] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2').to(_A)
__snake_case : Optional[int] = -1
__snake_case : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
__snake_case : int = model.generate(_A , max_new_tokens=10 , do_sample=_A)
__snake_case : int = tokenizer.decode(greedy_ids[0])
__snake_case : int = TextIteratorStreamer(_A)
__snake_case : List[Any] = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
__snake_case : Any = Thread(target=model.generate , kwargs=_A)
thread.start()
__snake_case : Dict = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_A , _A)
def _lowercase (self : Any) -> List[str]:
__snake_case : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
__snake_case : Any = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2').to(_A)
__snake_case : Union[str, Any] = -1
__snake_case : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
__snake_case : Optional[int] = model.generate(_A , max_new_tokens=10 , do_sample=_A)
__snake_case : Union[str, Any] = greedy_ids[:, input_ids.shape[1] :]
__snake_case : Optional[Any] = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
__snake_case : str = TextStreamer(_A , skip_prompt=_A)
model.generate(_A , max_new_tokens=10 , do_sample=_A , streamer=_A)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__snake_case : List[str] = cs.out[:-1]
self.assertEqual(_A , _A)
def _lowercase (self : List[str]) -> Dict:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__snake_case : int = AutoTokenizer.from_pretrained('distilgpt2')
__snake_case : Dict = AutoModelForCausalLM.from_pretrained('distilgpt2').to(_A)
__snake_case : Union[str, Any] = -1
__snake_case : List[str] = torch.ones((1, 5) , device=_A).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__snake_case : Tuple = TextStreamer(_A , skip_special_tokens=_A)
model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__snake_case : Tuple = cs.out[:-1] # Remove the final "\n"
__snake_case : int = tokenizer(_A , return_tensors='pt')
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def _lowercase (self : Optional[int]) -> List[str]:
__snake_case : Optional[int] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
__snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2').to(_A)
__snake_case : int = -1
__snake_case : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
__snake_case : List[Any] = TextIteratorStreamer(_A , timeout=0.001)
__snake_case : str = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
__snake_case : str = Thread(target=model.generate , kwargs=_A)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_A):
__snake_case : Any = ''
for new_text in streamer:
streamer_text += new_text
| 192 | 1 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
snake_case_ : Tuple = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : bool , __magic_name__ : str = None , __magic_name__ : list = None ) -> Dict:
lowerCamelCase_ : str = None
lowerCamelCase_ : str = os.path.abspath(os.path.join("examples" , "by_feature" ) )
lowerCamelCase_ : Union[str, Any] = os.path.abspath("examples" )
for item in os.listdir(__magic_name__ ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase_ : Union[str, Any] = os.path.join(__magic_name__ , __magic_name__ )
if os.path.isfile(__magic_name__ ) and ".py" in item_path:
with self.subTest(
tested_script=__magic_name__ , feature_script=__magic_name__ , tested_section="main()" if parser_only else "training_function()" , ):
lowerCamelCase_ : List[str] = compare_against_test(
os.path.join(__magic_name__ , __magic_name__ ) , __magic_name__ , __magic_name__ , __magic_name__ )
lowerCamelCase_ : Optional[Any] = "\n".join(__magic_name__ )
if special_strings is not None:
for string in special_strings:
lowerCamelCase_ : str = diff.replace(__magic_name__ , "" )
self.assertEqual(__magic_name__ , "" )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
self.one_complete_example("complete_nlp_example.py" , __magic_name__ )
self.one_complete_example("complete_nlp_example.py" , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
lowerCamelCase_ : Dict = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
lowerCamelCase_ : List[Any] = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , __magic_name__ , __magic_name__ , __magic_name__ )
self.one_complete_example("complete_cv_example.py" , __magic_name__ , __magic_name__ , __magic_name__ )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = False
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[str] ) -> Union[str, Any]:
super().setUpClass()
lowerCamelCase_ : List[str] = tempfile.mkdtemp()
lowerCamelCase_ : int = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
lowerCamelCase_ : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : str ) -> Any:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
lowerCamelCase_ : Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
lowerCamelCase_ : Dict = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
lowerCamelCase_ : int = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
lowerCamelCase_ : List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
lowerCamelCase_ : Optional[Any] = run_command(self._launch_args + testargs , return_stdout=__magic_name__ )
self.assertNotIn("epoch 0:" , __magic_name__ )
self.assertIn("epoch 1:" , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
lowerCamelCase_ : Optional[int] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
lowerCamelCase_ : str = run_command(self._launch_args + testargs , return_stdout=__magic_name__ )
if torch.cuda.is_available():
lowerCamelCase_ : int = torch.cuda.device_count()
else:
lowerCamelCase_ : List[str] = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , __magic_name__ )
self.assertIn("epoch 1:" , __magic_name__ )
else:
self.assertIn("epoch 0:" , __magic_name__ )
self.assertIn("epoch 1:" , __magic_name__ )
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
lowerCamelCase_ : Dict = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
lowerCamelCase_ : Optional[Any] = run_command(self._launch_args + testargs , return_stdout=__magic_name__ )
lowerCamelCase_ : Any = re.findall("({.+})" , __magic_name__ )
lowerCamelCase_ : Any = [r for r in results if "accuracy" in r][-1]
lowerCamelCase_ : str = ast.literal_eval(__magic_name__ )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
lowerCamelCase_ : Dict = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase_ : Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , "tracking" ) ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
lowerCamelCase_ : List[Any] = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
lowerCamelCase_ : Tuple = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 488 |
def __a ( __UpperCAmelCase : int = 100 ) -> int:
"""simple docstring"""
lowerCamelCase_ : Any = set()
lowerCamelCase_ : int = 0
lowerCamelCase_ : Tuple = n + 1 # maximum limit
for a in range(2 , __UpperCAmelCase ):
for b in range(2 , __UpperCAmelCase ):
lowerCamelCase_ : List[Any] = a**b # calculates the current power
collect_powers.add(__UpperCAmelCase ) # adds the result to the set
return len(__UpperCAmelCase )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip())))
| 488 | 1 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 494 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase :
def __init__( self ):
_UpperCAmelCase = {}
def __A ( self , a__ , a__ , a__=1 ):
if self.graph.get(a__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_UpperCAmelCase = [[w, v]]
if not self.graph.get(a__ ):
_UpperCAmelCase = []
def __A ( self ):
return list(self.graph )
def __A ( self , a__ , a__ ):
if self.graph.get(a__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(a__ )
def __A ( self , a__=-2 , a__=-1 ):
if s == d:
return []
_UpperCAmelCase = []
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(a__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(a__ ) != 0:
_UpperCAmelCase = stack[len(a__ ) - 1]
else:
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return visited
def __A ( self , a__=-1 ):
if c == -1:
_UpperCAmelCase = floor(random() * 1_00_00 ) + 10
for i in range(a__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
_UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(a__ , a__ , 1 )
def __A ( self , a__=-2 ):
_UpperCAmelCase = deque()
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
d.append(a__ )
visited.append(a__ )
while d:
_UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __A ( self , a__ ):
_UpperCAmelCase = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __A ( self , a__ ):
return len(self.graph[u] )
def __A ( self , a__=-2 ):
_UpperCAmelCase = []
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase = s
_UpperCAmelCase = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(a__ ) != 0:
_UpperCAmelCase = stack[len(a__ ) - 1]
else:
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return sorted_nodes
def __A ( self ):
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(a__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(a__ ) != 0:
_UpperCAmelCase = stack[len(a__ ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(a__ )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return list(a__ )
def __A ( self ):
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(a__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(a__ ) != 0:
_UpperCAmelCase = stack[len(a__ ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(a__ )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return False
def __A ( self , a__=-2 , a__=-1 ):
_UpperCAmelCase = time()
self.dfs(a__ , a__ )
_UpperCAmelCase = time()
return end - begin
def __A ( self , a__=-2 ):
_UpperCAmelCase = time()
self.bfs(a__ )
_UpperCAmelCase = time()
return end - begin
class lowerCAmelCase :
def __init__( self ):
_UpperCAmelCase = {}
def __A ( self , a__ , a__ , a__=1 ):
# check if the u exists
if self.graph.get(a__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_UpperCAmelCase = [[w, v]]
# add the other way
if self.graph.get(a__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_UpperCAmelCase = [[w, u]]
def __A ( self , a__ , a__ ):
if self.graph.get(a__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(a__ )
# the other way round
if self.graph.get(a__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(a__ )
def __A ( self , a__=-2 , a__=-1 ):
if s == d:
return []
_UpperCAmelCase = []
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(a__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(a__ ) != 0:
_UpperCAmelCase = stack[len(a__ ) - 1]
else:
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return visited
def __A ( self , a__=-1 ):
if c == -1:
_UpperCAmelCase = floor(random() * 1_00_00 ) + 10
for i in range(a__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
_UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(a__ , a__ , 1 )
def __A ( self , a__=-2 ):
_UpperCAmelCase = deque()
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
d.append(a__ )
visited.append(a__ )
while d:
_UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __A ( self , a__ ):
return len(self.graph[u] )
def __A ( self ):
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(a__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(a__ ) != 0:
_UpperCAmelCase = stack[len(a__ ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(a__ )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return list(a__ )
def __A ( self ):
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(a__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(a__ ) != 0:
_UpperCAmelCase = stack[len(a__ ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(a__ )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return False
def __A ( self ):
return list(self.graph )
def __A ( self , a__=-2 , a__=-1 ):
_UpperCAmelCase = time()
self.dfs(a__ , a__ )
_UpperCAmelCase = time()
return end - begin
def __A ( self , a__=-2 ):
_UpperCAmelCase = time()
self.bfs(a__ )
_UpperCAmelCase = time()
return end - begin
| 494 | 1 |
import math
def A_ ( a , a ):
"""simple docstring"""
return math.pow(a , 2 ) - a
def A_ ( a ):
"""simple docstring"""
return 2 * x
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 2.0
while start <= a:
SCREAMING_SNAKE_CASE_ : Optional[Any] = math.pow(a , 2 )
return start
def A_ ( a , a = 9_9_9_9 , a = 0.00_00_00_00_00_00_01 ):
"""simple docstring"""
if a < 0:
raise ValueError('math domain error' )
SCREAMING_SNAKE_CASE_ : Dict = get_initial_point(a )
for _ in range(a ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = value
SCREAMING_SNAKE_CASE_ : Any = value - fx(a , a ) / fx_derivative(a )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 511 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
_UpperCamelCase = logging.getLogger(__name__)
class __magic_name__ ( lowerCAmelCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
'''simple docstring'''
__A : List[Any] = self.layer[current_layer](lowerCamelCase , lowerCamelCase , head_mask[current_layer] )
__A : Tuple = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , lowerCAmelCase , )
class __magic_name__ ( lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase ):
'''simple docstring'''
super().__init__(lowerCamelCase )
__A : Any = BertEncoderWithPabee(lowerCamelCase )
self.init_weights()
__A : Optional[Any] = 0
__A : Union[str, Any] = 0
__A : Dict = 0
__A : List[Any] = 0
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
__A : Any = threshold
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
__A : List[Any] = patience
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : List[str] = 0
__A : int = 0
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[Any] = self.inference_layers_num / self.inference_instances_num
__A : int = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(lowerCamelCase )
@add_start_docstrings_to_model_forward(lowerCamelCase )
def lowerCAmelCase__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=False , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__A : Optional[int] = input_ids.size()
elif inputs_embeds is not None:
__A : Any = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__A : List[str] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__A : Any = torch.ones(lowerCamelCase , device=lowerCamelCase )
if token_type_ids is None:
__A : List[Any] = torch.zeros(lowerCamelCase , dtype=torch.long , device=lowerCamelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__A : torch.Tensor = self.get_extended_attention_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__A ,__A ,__A : Tuple = encoder_hidden_states.size()
__A : Any = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__A : List[str] = torch.ones(lowerCamelCase , device=lowerCamelCase )
__A : Dict = self.invert_attention_mask(lowerCamelCase )
else:
__A : Union[str, Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__A : Any = self.get_head_mask(lowerCamelCase , self.config.num_hidden_layers )
__A : Tuple = self.embeddings(
input_ids=lowerCamelCase , position_ids=lowerCamelCase , token_type_ids=lowerCamelCase , inputs_embeds=lowerCamelCase )
__A : str = embedding_output
if self.training:
__A : Dict = []
for i in range(self.config.num_hidden_layers ):
__A : str = self.encoder.adaptive_forward(
lowerCamelCase , current_layer=lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase )
__A : Optional[Any] = self.pooler(lowerCamelCase )
__A : Optional[Any] = output_layers[i](output_dropout(lowerCamelCase ) )
res.append(lowerCamelCase )
elif self.patience == 0: # Use all layers for inference
__A : int = self.encoder(
lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__A : Optional[Any] = self.pooler(encoder_outputs[0] )
__A : List[str] = [output_layers[self.config.num_hidden_layers - 1](lowerCamelCase )]
else:
__A : int = 0
__A : List[str] = None
__A : Optional[int] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__A : List[str] = self.encoder.adaptive_forward(
lowerCamelCase , current_layer=lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase )
__A : List[str] = self.pooler(lowerCamelCase )
__A : str = output_layers[i](lowerCamelCase )
if regression:
__A : int = logits.detach()
if patient_result is not None:
__A : Union[str, Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__A : Any = 0
else:
__A : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
__A : List[str] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowerCamelCase ) ):
patient_counter += 1
else:
__A : Union[str, Any] = 0
__A : int = logits
if patient_counter == self.patience:
break
__A : List[str] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , lowerCAmelCase , )
class __magic_name__ ( lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase ):
'''simple docstring'''
super().__init__(lowerCamelCase )
__A : Union[str, Any] = config.num_labels
__A : Union[str, Any] = BertModelWithPabee(lowerCamelCase )
__A : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob )
__A : int = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCamelCase )
def lowerCAmelCase__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
'''simple docstring'''
__A : Optional[Any] = self.bert(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , position_ids=lowerCamelCase , head_mask=lowerCamelCase , inputs_embeds=lowerCamelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__A : Any = (logits[-1],)
if labels is not None:
__A : Optional[Any] = None
__A : int = 0
for ix, logits_item in enumerate(lowerCamelCase ):
if self.num_labels == 1:
# We are doing regression
__A : Union[str, Any] = MSELoss()
__A : str = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__A : Optional[int] = CrossEntropyLoss()
__A : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__A : str = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__A : Tuple = (total_loss / total_weights,) + outputs
return outputs
| 111 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : Union[str, Any] = """longformer"""
def __init__( self :str , __lowercase :Union[List[int], int] = 512 , __lowercase :int = 2 , __lowercase :int = 1 , __lowercase :int = 0 , __lowercase :int = 2 , __lowercase :int = 3_0522 , __lowercase :int = 768 , __lowercase :int = 12 , __lowercase :int = 12 , __lowercase :int = 3072 , __lowercase :str = "gelu" , __lowercase :float = 0.1 , __lowercase :float = 0.1 , __lowercase :int = 512 , __lowercase :int = 2 , __lowercase :float = 0.02 , __lowercase :float = 1e-1_2 , __lowercase :bool = False , **__lowercase :Any , ):
super().__init__(pad_token_id=__lowercase , **__lowercase )
__lowerCamelCase : Optional[Any] =attention_window
__lowerCamelCase : Union[str, Any] =sep_token_id
__lowerCamelCase : Tuple =bos_token_id
__lowerCamelCase : Tuple =eos_token_id
__lowerCamelCase : Optional[Any] =vocab_size
__lowerCamelCase : Tuple =hidden_size
__lowerCamelCase : List[Any] =num_hidden_layers
__lowerCamelCase : Optional[int] =num_attention_heads
__lowerCamelCase : List[Any] =hidden_act
__lowerCamelCase : Optional[Any] =intermediate_size
__lowerCamelCase : Optional[int] =hidden_dropout_prob
__lowerCamelCase : Tuple =attention_probs_dropout_prob
__lowerCamelCase : List[Any] =max_position_embeddings
__lowerCamelCase : Tuple =type_vocab_size
__lowerCamelCase : Dict =initializer_range
__lowerCamelCase : List[Any] =layer_norm_eps
__lowerCamelCase : Any =onnx_export
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
def __init__( self :List[str] , __lowercase :"PretrainedConfig" , __lowercase :str = "default" , __lowercase :"List[PatchingSpec]" = None ):
super().__init__(__lowercase , __lowercase , __lowercase )
__lowerCamelCase : Optional[Any] =True
@property
def __lowercase ( self :int ):
if self.task == "multiple-choice":
__lowerCamelCase : Dict ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCamelCase : Optional[Any] ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def __lowercase ( self :List[str] ):
__lowerCamelCase : Optional[int] =super().outputs
if self.task == "default":
__lowerCamelCase : Tuple ={0: '''batch'''}
return outputs
@property
def __lowercase ( self :Tuple ):
return 1e-4
@property
def __lowercase ( self :Optional[int] ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def __lowercase ( self :Any , __lowercase :"PreTrainedTokenizerBase" , __lowercase :int = -1 , __lowercase :int = -1 , __lowercase :bool = False , __lowercase :Optional[TensorType] = None , ):
__lowerCamelCase : Optional[Any] =super().generate_dummy_inputs(
preprocessor=__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
__lowerCamelCase : Tuple =torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
__lowerCamelCase : List[str] =1
return inputs
| 363 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : Union[str, Any] = """time_series_transformer"""
__snake_case : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self :List[Any] , __lowercase :Optional[int] = None , __lowercase :Optional[int] = None , __lowercase :str = "student_t" , __lowercase :str = "nll" , __lowercase :int = 1 , __lowercase :List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase :Optional[Union[str, bool]] = "mean" , __lowercase :int = 0 , __lowercase :int = 0 , __lowercase :int = 0 , __lowercase :int = 0 , __lowercase :Optional[List[int]] = None , __lowercase :Optional[List[int]] = None , __lowercase :int = 32 , __lowercase :int = 32 , __lowercase :int = 2 , __lowercase :int = 2 , __lowercase :int = 2 , __lowercase :int = 2 , __lowercase :bool = True , __lowercase :str = "gelu" , __lowercase :int = 64 , __lowercase :float = 0.1 , __lowercase :float = 0.1 , __lowercase :float = 0.1 , __lowercase :float = 0.1 , __lowercase :float = 0.1 , __lowercase :int = 100 , __lowercase :float = 0.02 , __lowercase :int=True , **__lowercase :Optional[Any] , ):
# time series specific configuration
__lowerCamelCase : Tuple =prediction_length
__lowerCamelCase : List[Any] =context_length or prediction_length
__lowerCamelCase : Dict =distribution_output
__lowerCamelCase : str =loss
__lowerCamelCase : Tuple =input_size
__lowerCamelCase : int =num_time_features
__lowerCamelCase : int =lags_sequence
__lowerCamelCase : Optional[int] =scaling
__lowerCamelCase : str =num_dynamic_real_features
__lowerCamelCase : Optional[Any] =num_static_real_features
__lowerCamelCase : List[Any] =num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__lowerCamelCase : Optional[int] =cardinality
else:
__lowerCamelCase : str =[0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__lowerCamelCase : int =embedding_dimension
else:
__lowerCamelCase : Optional[int] =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowerCamelCase : List[str] =num_parallel_samples
# Transformer architecture configuration
__lowerCamelCase : str =input_size * len(__lowercase ) + self._number_of_features
__lowerCamelCase : Union[str, Any] =d_model
__lowerCamelCase : int =encoder_attention_heads
__lowerCamelCase : int =decoder_attention_heads
__lowerCamelCase : Dict =encoder_ffn_dim
__lowerCamelCase : int =decoder_ffn_dim
__lowerCamelCase : List[Any] =encoder_layers
__lowerCamelCase : int =decoder_layers
__lowerCamelCase : Union[str, Any] =dropout
__lowerCamelCase : Optional[Any] =attention_dropout
__lowerCamelCase : List[str] =activation_dropout
__lowerCamelCase : List[str] =encoder_layerdrop
__lowerCamelCase : int =decoder_layerdrop
__lowerCamelCase : Tuple =activation_function
__lowerCamelCase : str =init_std
__lowerCamelCase : Dict =use_cache
super().__init__(is_encoder_decoder=__lowercase , **__lowercase )
@property
def __lowercase ( self :int ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 363 | 1 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def a ( lowerCamelCase_="" ):
'''simple docstring'''
lowercase__ = tempfile.mkdtemp()
return os.path.join(lowerCamelCase_ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = torch.rand(12, dtype=torch.floataa ) - 0.5
lowercase__ = AgentAudio(lowerCamelCase )
lowercase__ = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase, agent_type.to_raw(), atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCamelCase ) )
# Ensure that the file contains the same value as the original tensor
lowercase__ , lowercase__ = sf.read(lowerCamelCase )
self.assertTrue(torch.allclose(lowerCamelCase, torch.tensor(lowerCamelCase ), atol=1E-4 ) )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = torch.rand(12, dtype=torch.floataa ) - 0.5
lowercase__ = get_new_path(suffix='''.wav''' )
sf.write(lowerCamelCase, lowerCamelCase, 16_000 )
lowercase__ = AgentAudio(lowerCamelCase )
self.assertTrue(torch.allclose(lowerCamelCase, agent_type.to_raw(), atol=1E-4 ) )
self.assertEqual(agent_type.to_string(), lowerCamelCase )
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = torch.randint(0, 256, (64, 64, 3) )
lowercase__ = AgentImage(lowerCamelCase )
lowercase__ = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase, agent_type._tensor, atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw(), Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase ) )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
lowercase__ = Image.open(lowerCamelCase )
lowercase__ = AgentImage(lowerCamelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase ) )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
lowercase__ = Image.open(lowerCamelCase )
lowercase__ = AgentImage(lowerCamelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase ) )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = '''Hey!'''
lowercase__ = AgentText(lowerCamelCase )
self.assertEqual(lowerCamelCase, agent_type.to_string() )
self.assertEqual(lowerCamelCase, agent_type.to_raw() )
self.assertEqual(lowerCamelCase, lowerCamelCase )
| 183 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = (IPNDMScheduler,)
lowercase__ = (("""num_inference_steps""", 50),)
def lowercase__ ( self : Union[str, Any], **lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = {'''num_train_timesteps''': 1_000}
config.update(**lowerCamelCase )
return config
def lowercase__ ( self : Any, lowerCamelCase : Any=0, **lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop('''num_inference_steps''', lowerCamelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**lowerCamelCase )
lowercase__ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
lowercase__ = scheduler_class.from_pretrained(lowerCamelCase )
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
lowercase__ = new_scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
lowercase__ = new_scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase__ ( self : str ):
'''simple docstring'''
pass
def lowercase__ ( self : Dict, lowerCamelCase : Optional[int]=0, **lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop('''num_inference_steps''', lowerCamelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
lowercase__ = scheduler_class.from_pretrained(lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
lowercase__ = new_scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
lowercase__ = new_scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase__ ( self : int, **lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**lowerCamelCase )
lowercase__ = scheduler_class(**lowerCamelCase )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(lowerCamelCase, lowerCamelCase )
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(lowerCamelCase, lowerCamelCase )
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase ).prev_sample
return sample
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop('''num_inference_steps''', lowerCamelCase )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase, '''set_timesteps''' ):
scheduler.set_timesteps(lowerCamelCase )
elif num_inference_steps is not None and not hasattr(lowerCamelCase, '''set_timesteps''' ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.timesteps[5]
lowercase__ = scheduler.timesteps[6]
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def lowercase__ ( self : Dict ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase, time_step=lowerCamelCase )
def lowercase__ ( self : str ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase, time_step=lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.full_loop()
lowercase__ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 183 | 1 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowerCamelCase ( lowerCamelCase : Dict):
A_ , A_ : List[str] = image.size
A_ , A_ : Optional[int] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A_ : Optional[Any] = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""])
A_ : Dict = np.array(lowerCamelCase).astype(np.floataa) / 255.0
A_ : List[str] = image[None].transpose(0 , 3 , 1 , 2)
A_ : int = torch.from_numpy(lowerCamelCase)
return 2.0 * image - 1.0
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] ,_a : VQModel ,_a : UNetaDModel ,_a : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] ,):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=_a ,unet=_a ,scheduler=_a )
@torch.no_grad()
def __call__( self : Any ,_a : Union[torch.Tensor, PIL.Image.Image] = None ,_a : Optional[int] = 1 ,_a : Optional[int] = 100 ,_a : Optional[float] = 0.0 ,_a : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_a : Optional[str] = "pil" ,_a : bool = True ,):
'''simple docstring'''
if isinstance(_a ,PIL.Image.Image ):
A_ : Any = 1
elif isinstance(_a ,torch.Tensor ):
A_ : Optional[int] = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}' )
if isinstance(_a ,PIL.Image.Image ):
A_ : str = preprocess(_a )
A_ , A_ : Any = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
A_ : Dict = (batch_size, self.unet.config.in_channels // 2, height, width)
A_ : Optional[int] = next(self.unet.parameters() ).dtype
A_ : int = randn_tensor(_a ,generator=_a ,device=self.device ,dtype=_a )
A_ : List[Any] = image.to(device=self.device ,dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a ,device=self.device )
A_ : Optional[int] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
A_ : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ : Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ : List[Any] = {}
if accepts_eta:
A_ : Union[str, Any] = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
A_ : List[str] = torch.cat([latents, image] ,dim=1 )
A_ : Optional[Any] = self.scheduler.scale_model_input(_a ,_a )
# predict the noise residual
A_ : Union[str, Any] = self.unet(_a ,_a ).sample
# compute the previous noisy sample x_t -> x_t-1
A_ : Union[str, Any] = self.scheduler.step(_a ,_a ,_a ,**_a ).prev_sample
# decode the image latents with the VQVAE
A_ : Any = self.vqvae.decode(_a ).sample
A_ : Tuple = torch.clamp(_a ,-1.0 ,1.0 )
A_ : int = image / 2 + 0.5
A_ : Any = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
A_ : List[Any] = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 27 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = KandinskyVaaControlnetPipeline
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_ = False
@property
def _a ( self : Any ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def _a ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def _a ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : Tuple = UNetaDConditionModel(**_a )
return model
@property
def _a ( self : List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.dummy_unet
A_ : int = self.dummy_movq
A_ : Tuple = DDIMScheduler(
num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,)
A_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a ( self : Dict ,_a : str ,_a : Union[str, Any]=0 ):
'''simple docstring'''
A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a )
A_ : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
A_ : Optional[Any] = torch.manual_seed(_a )
else:
A_ : str = torch.Generator(device=_a ).manual_seed(_a )
A_ : List[Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[Any] = """cpu"""
A_ : List[str] = self.get_dummy_components()
A_ : Tuple = self.pipeline_class(**_a )
A_ : Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) )
A_ : Tuple = output.images
A_ : Optional[Any] = pipe(
**self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0]
A_ : Tuple = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : List[Any] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
A_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A_ : Optional[int] = torch.from_numpy(np.array(_a ) ).float() / 255.0
A_ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_a )
A_ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa )
A_ : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
A_ : Optional[Any] = """A robot, 4k photo"""
A_ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ , A_ : List[str] = pipe_prior(
_a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
A_ : int = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ : List[Any] = pipeline(
image_embeds=_a ,negative_image_embeds=_a ,hint=_a ,generator=_a ,num_inference_steps=100 ,output_type="""np""" ,)
A_ : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a ,_a )
| 27 | 1 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Dict ,A : Optional[Any] ,):
__A = parent
__A = 13
__A = 7
__A = 30
__A = self.seq_length + self.mem_len
__A = 15
__A = True
__A = True
__A = 99
__A = [10, 50, 80]
__A = 32
__A = 32
__A = 4
__A = 8
__A = 1_28
__A = 2
__A = 2
__A = None
__A = 1
__A = 0
__A = 3
__A = self.vocab_size - 1
__A = 0.01
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__A = TransfoXLConfig(
vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,)
return (config, input_ids_a, input_ids_a, lm_labels)
def UpperCamelCase_ ( self : Any ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def UpperCamelCase_ ( self : str ,A : List[Any] ,A : str ,A : Optional[Any] ,A : Union[str, Any] ):
__A = TFTransfoXLModel(A )
__A , __A = model(A ).to_tuple()
__A = {"input_ids": input_ids_a, "mems": mems_a}
__A , __A = model(A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def UpperCamelCase_ ( self : Optional[Any] ,A : Dict ,A : Any ,A : Any ,A : Optional[Any] ):
__A = TFTransfoXLLMHeadModel(A )
__A , __A = model(A ).to_tuple()
__A = {"input_ids": input_ids_a, "labels": lm_labels}
__A , __A = model(A ).to_tuple()
__A , __A = model([input_ids_a, mems_a] ).to_tuple()
__A = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
__A , __A = model(A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def UpperCamelCase_ ( self : Optional[Any] ,A : Tuple ,A : str ,A : Union[str, Any] ,A : Optional[int] ):
__A = TFTransfoXLForSequenceClassification(A )
__A = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : int ):
__A = self.prepare_config_and_inputs()
((__A) , (__A) , (__A) , (__A)) = config_and_inputs
__A = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
snake_case_ = () if is_tf_available() else ()
snake_case_ = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : List[str] ,A : str ,A : Optional[Any] ,A : Any ,A : Optional[Any] ,A : List[Any] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def UpperCamelCase_ ( self : Optional[Any] ):
__A = TFTransfoXLModelTester(self )
__A = ConfigTester(self ,config_class=A ,d_embed=37 )
def UpperCamelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Tuple ):
self.model_tester.set_seed()
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A )
def UpperCamelCase_ ( self : str ):
self.model_tester.set_seed()
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A )
def UpperCamelCase_ ( self : int ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A )
def UpperCamelCase_ ( self : Tuple ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
__A = model_class(A )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
__A = model.get_output_embeddings()
assert isinstance(A ,tf.keras.layers.Layer )
__A = model.get_bias()
assert name is None
else:
__A = model.get_output_embeddings()
assert x is None
__A = model.get_bias()
assert name is None
def UpperCamelCase_ ( self : Union[str, Any] ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def UpperCamelCase_ ( self : List[Any] ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFTransfoXLModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def UpperCamelCase_ ( self : List[Any] ):
__A = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
__A = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
__A = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
__A = model.generate(A ,max_length=2_00 ,do_sample=A )
self.assertListEqual(output_ids[0].numpy().tolist() ,A )
| 55 | '''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__snake_case : List[Any] = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Optional[int] = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : int = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Dict = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : Tuple = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : str = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def lowerCamelCase__ ( A_ ):
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=False ):
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=None ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.bias"""]
UpperCAmelCase_ = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ ):
UpperCAmelCase_ = torch.load(A_ , map_location="cpu" )
UpperCAmelCase_ = {}
UpperCAmelCase_ = checkpoint["time_embed.0.weight"]
UpperCAmelCase_ = checkpoint["time_embed.0.bias"]
UpperCAmelCase_ = checkpoint["time_embed.2.weight"]
UpperCAmelCase_ = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ = checkpoint["label_emb.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase_ = unet_config["down_block_types"]
UpperCAmelCase_ = unet_config["layers_per_block"]
UpperCAmelCase_ = unet_config["attention_head_dim"]
UpperCAmelCase_ = unet_config["block_out_channels"]
UpperCAmelCase_ = 1
UpperCAmelCase_ = channels_list[0]
for i, layer_type in enumerate(A_ ):
UpperCAmelCase_ = channels_list[i]
UpperCAmelCase_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
current_layer += 1
UpperCAmelCase_ = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ = "mid_block.resnets.0"
UpperCAmelCase_ = "middle_block.0"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.attentions.0"
UpperCAmelCase_ = "middle_block.1"
UpperCAmelCase_ = convert_attention(A_ , A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.resnets.1"
UpperCAmelCase_ = "middle_block.2"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = unet_config["up_block_types"]
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = checkpoint["out.0.weight"]
UpperCAmelCase_ = checkpoint["out.0.bias"]
UpperCAmelCase_ = checkpoint["out.2.weight"]
UpperCAmelCase_ = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
__snake_case : List[str] = parser.parse_args()
__snake_case : Any = strabool(args.class_cond)
__snake_case : List[str] = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
__snake_case : Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__snake_case : List[str] = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
__snake_case : Optional[Any] = None
__snake_case : Optional[int] = con_pt_to_diffuser(args.unet_path, unet_config)
__snake_case : str = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__snake_case : Tuple = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__snake_case : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
__snake_case : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config)
__snake_case : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 660 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
__lowerCamelCase = IFInpaintingSuperResolutionPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def UpperCAmelCase ( self :Dict , _lowercase :Dict , _lowercase :Any=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("mps" ):
lowercase__ = torch.manual_seed(_lowercase )
else:
lowercase__ = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
lowercase__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_lowercase ) ).to(_lowercase )
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
lowercase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 611 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase :
@staticmethod
def UpperCAmelCase ( *_lowercase :List[Any] , **_lowercase :Tuple ):
'''simple docstring'''
pass
def _A ( __magic_name__ ):
lowercase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
__lowerCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCAmelCase ( self :Dict , _lowercase :str , _lowercase :Union[str, Any] , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase ( self :List[Any] , _lowercase :Optional[int] , _lowercase :str ):
'''simple docstring'''
lowercase__ = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , _lowercase )
import datasets
lowercase__ = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
lowercase__ = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , _lowercase , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
pass
@slow
@require_torch
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "Intel/dpt-large"
lowercase__ = pipeline("depth-estimation" , model=_lowercase )
lowercase__ = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
lowercase__ = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 611 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__A : Dict = logging.get_logger(__name__)
__A : Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__A : Any = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int ):
'''simple docstring'''
for attribute in key.split(""".""" ):
snake_case_ : List[str] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
snake_case_ : List[str] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
snake_case_ : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case_ : Any = value
elif weight_type == "weight_g":
snake_case_ : Optional[int] = value
elif weight_type == "weight_v":
snake_case_ : Dict = value
elif weight_type == "bias":
snake_case_ : Optional[Any] = value
else:
snake_case_ : List[Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = []
snake_case_ : Dict = fairseq_model.state_dict()
snake_case_ : str = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case_ : Union[str, Any] = None
for name, value in fairseq_dict.items():
snake_case_ : str = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == """group""" , )
snake_case_ : Dict = True
elif name.split(""".""" )[0] == "proj":
snake_case_ : Dict = fairseq_model.proj
snake_case_ : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case_ : Dict = True
if "*" in mapped_key:
snake_case_ : Optional[Any] = name.split(lowerCAmelCase__ )[0].split(""".""" )[-2]
snake_case_ : Optional[int] = mapped_key.replace("""*""" , lowerCAmelCase__ )
if "weight_g" in name:
snake_case_ : Dict = """weight_g"""
elif "weight_v" in name:
snake_case_ : Any = """weight_v"""
elif "bias" in name:
snake_case_ : List[Any] = """bias"""
elif "weight" in name:
snake_case_ : Dict = """weight"""
else:
snake_case_ : Any = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[str] ):
'''simple docstring'''
snake_case_ : List[str] = full_name.split("""conv_layers.""" )[-1]
snake_case_ : Dict = name.split(""".""" )
snake_case_ : int = int(items[0] )
snake_case_ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case_ : Optional[int] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case_ : str = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case_ : Tuple = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case_ : Optional[int] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase__ )
def UpperCAmelCase ( lowerCamelCase_ :List[str] ):
'''simple docstring'''
snake_case_ , snake_case_ : Union[str, Any] = emb.weight.shape
snake_case_ : Tuple = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
snake_case_ : Tuple = emb.weight.data
return lin_layer
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
snake_case_ : List[str] = f.readlines()
snake_case_ : str = [line.split(""" """ )[0] for line in lines]
snake_case_ : Dict = len(lowerCAmelCase__ )
snake_case_ : int = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(lowerCAmelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Any , ):
'''simple docstring'''
snake_case_ : List[str] = WavaVecaConfig.from_pretrained(lowerCAmelCase__ )
snake_case_ : Dict = SpeechaTextaConfig.from_pretrained(
lowerCAmelCase__ , vocab_size=lowerCAmelCase__ , decoder_layers=lowerCAmelCase__ , do_stable_layer_norm=lowerCAmelCase__ )
snake_case_ : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
snake_case_ : int = model[0].eval()
# set weights for wav2vec2 encoder
snake_case_ : Dict = WavaVecaModel(lowerCAmelCase__ )
snake_case_ : Dict = recursively_load_weights_wavaveca(model.encoder , lowerCAmelCase__ )
snake_case_ : str = SpeechaTextaForCausalLM(lowerCAmelCase__ )
snake_case_ , snake_case_ : int = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCAmelCase__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
snake_case_ : Optional[int] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
snake_case_ : int = SpeechEncoderDecoderModel(encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
snake_case_ : str = False
# add projection layer
snake_case_ : Dict = nn.Parameter(projection_layer.weight )
snake_case_ : Tuple = nn.Parameter(projection_layer.bias )
snake_case_ : Optional[int] = create_vocab_dict(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = SpeechaTextaTokenizer(os.path.join(lowerCAmelCase__ , """vocab.json""" ) )
tokenizer.save_pretrained(lowerCAmelCase__ )
snake_case_ : List[str] = hf_wavavec.config.to_dict()
snake_case_ : List[Any] = tokenizer.pad_token_id
snake_case_ : Any = tokenizer.bos_token_id
snake_case_ : Any = tokenizer.eos_token_id
snake_case_ : List[str] = """speech_to_text_2"""
snake_case_ : Any = """wav2vec2"""
snake_case_ : Dict = SpeechEncoderDecoderConfig.from_dict(lowerCAmelCase__ )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
feature_extractor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=10_224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
__A : Optional[int] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 334 |
"""simple docstring"""
from __future__ import annotations
import requests
def lowercase ( lowerCAmelCase__ : str ) -> dict:
__a = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(lowerCAmelCase__ ).json()
def lowercase ( lowerCAmelCase__ : int = 10 ) -> list[dict]:
__a = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
__a = requests.get(lowerCAmelCase__ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase__ ) for story_id in story_ids]
def lowercase ( lowerCAmelCase__ : int = 10 ) -> str:
__a = hackernews_top_stories(lowerCAmelCase__ )
return "\n".join('''* [{title}]({url})'''.format(**lowerCAmelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 695 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = MgpstrTokenizer
snake_case_ = False
snake_case_ = {}
snake_case_ = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# fmt: off
UpperCAmelCase__ : Union[str, Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
UpperCAmelCase__ : List[str] = dict(zip(A ,range(len(A ) ) ) )
UpperCAmelCase__ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
def __lowercase ( self : Tuple ,**A : Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**A )
def __lowercase ( self : str ,A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = """tester"""
UpperCAmelCase__ : Optional[Any] = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def __lowercase ( self : int ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.get_tokenizers(do_lower_case=A )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase__ : List[str] = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
UpperCAmelCase__ : Optional[Any] = tokenizer.encode([special_token] ,add_special_tokens=A )
self.assertEqual(len(A ) ,1 )
UpperCAmelCase__ : int = tokenizer.decode(A ,skip_special_tokens=A )
self.assertTrue(special_token not in decoded )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase__ : int = self.get_input_output_texts(A )
UpperCAmelCase__ : Any = tokenizer.tokenize(A )
UpperCAmelCase__ : int = tokenizer.convert_tokens_to_ids(A )
UpperCAmelCase__ : Dict = tokenizer.encode(A ,add_special_tokens=A )
self.assertListEqual(A ,A )
UpperCAmelCase__ : int = tokenizer.convert_ids_to_tokens(A )
self.assertNotEqual(len(A ) ,0 )
UpperCAmelCase__ : Optional[Any] = tokenizer.decode(A )
self.assertIsInstance(A ,A )
self.assertEqual(text_a.replace(""" """ ,"""""" ) ,A )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
pass
| 703 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__UpperCAmelCase = 'examples/'
__UpperCAmelCase = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
__UpperCAmelCase = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
__UpperCAmelCase = 'README.md'
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase__ : Dict = f.read()
UpperCAmelCase__ , UpperCAmelCase__ : Any = REPLACE_PATTERNS[pattern]
UpperCAmelCase__ : List[str] = replace.replace("""VERSION""" , __UpperCamelCase )
UpperCAmelCase__ : Tuple = re_pattern.sub(__UpperCamelCase , __UpperCamelCase )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern="""examples""" )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Dict = """🤗 Transformers currently provides the following architectures"""
UpperCAmelCase__ : Union[str, Any] = """1. Want to contribute a new model?"""
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase__ : str = f.readlines()
# Find the start of the list.
UpperCAmelCase__ : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase__ : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
UpperCAmelCase__ : List[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCamelCase )
def lowerCAmelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
UpperCAmelCase__ : Optional[Any] = f.read()
UpperCAmelCase__ : int = REPLACE_PATTERNS["""init"""][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase=False ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
UpperCAmelCase__ : List[str] = default_version.base_version
elif patch:
UpperCAmelCase__ : Any = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
UpperCAmelCase__ : Optional[Any] = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
UpperCAmelCase__ : Optional[int] = input(F"Which version are you releasing? [{default_version}]" )
if len(__UpperCamelCase ) == 0:
UpperCAmelCase__ : Union[str, Any] = default_version
print(F"Updating version to {version}." )
global_version_update(__UpperCamelCase , patch=__UpperCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = get_version()
UpperCAmelCase__ : Any = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
UpperCAmelCase__ : List[Any] = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase__ : Dict = input(F"Which version are we developing now? [{dev_version}]" )
if len(__UpperCamelCase ) == 0:
UpperCAmelCase__ : List[Any] = dev_version
print(F"Updating version to {version}." )
global_version_update(__UpperCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
__UpperCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 194 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
A_ = '''swin'''
A_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowerCamelCase_=2_2_4 , lowerCamelCase_=4 , lowerCamelCase_=3 , lowerCamelCase_=9_6 , lowerCamelCase_=[2, 2, 6, 2] , lowerCamelCase_=[3, 6, 1_2, 2_4] , lowerCamelCase_=7 , lowerCamelCase_=4.0 , lowerCamelCase_=True , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.1 , lowerCamelCase_="gelu" , lowerCamelCase_=False , lowerCamelCase_=0.02 , lowerCamelCase_=1e-5 , lowerCamelCase_=3_2 , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Any:
super().__init__(**lowerCamelCase_)
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = len(lowerCamelCase_)
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase_) - 1))
UpperCamelCase = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(lowerCamelCase_) + 1)]
UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def UpperCAmelCase__ ( self) -> float:
return 1e-4 | 34 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(_lowercase ,_lowercase ,_lowercase )
count += _in_place_quick_sort(_lowercase ,_lowercase ,p - 1 )
count += _in_place_quick_sort(_lowercase ,p + 1 ,_lowercase )
return count
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(_lowercase ,_lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE_ = TemporaryFile()
SCREAMING_SNAKE_CASE_ = 100 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE_ = np.load(outfile)
SCREAMING_SNAKE_CASE_ = len(M) - 1
SCREAMING_SNAKE_CASE_ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 34 | 1 |
"""simple docstring"""
from __future__ import annotations
def A__ ( UpperCamelCase__ ): # This function is recursive
'''simple docstring'''
_SCREAMING_SNAKE_CASE = len(UpperCamelCase__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_SCREAMING_SNAKE_CASE = array[0]
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = []
while not is_found and i < array_length:
if array[i] < pivot:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = [element for element in array[i:] if element >= array[i]]
_SCREAMING_SNAKE_CASE = longest_subsequence(UpperCamelCase__ )
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
_SCREAMING_SNAKE_CASE = temp_array
else:
i += 1
_SCREAMING_SNAKE_CASE = [element for element in array[1:] if element >= pivot]
_SCREAMING_SNAKE_CASE = [pivot, *longest_subsequence(UpperCamelCase__ )]
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168 |
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __snake_case( __A ):
def __lt__( self , A_ ):
'''simple docstring'''
return self[-1] < other[-1]
def __eq__( self , A_ ):
'''simple docstring'''
return self[-1] == other[-1]
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
# sort into stacks
for element in collection:
_SCREAMING_SNAKE_CASE = Stack([element] )
_SCREAMING_SNAKE_CASE = bisect_left(UpperCamelCase__ , UpperCamelCase__ )
if i != len(UpperCamelCase__ ):
stacks[i].append(UpperCamelCase__ )
else:
stacks.append(UpperCamelCase__ )
# use a heap-based merge to merge stack efficiently
_SCREAMING_SNAKE_CASE = merge(*(reversed(UpperCamelCase__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowerCamelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase : Optional[Any] = [int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 168 | 1 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
A_ = ""
A_ = ""
A_ = ""
A_ = 1 # (0 is vertical, 1 is horizontal)
def _UpperCamelCase ( ) -> None:
lowerCamelCase_ ,lowerCamelCase_ = get_dataset(__UpperCamelCase ,__UpperCamelCase )
print('Processing...' )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = update_image_and_anno(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
for index, image in enumerate(__UpperCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCamelCase_ = random_chars(32 )
lowerCamelCase_ = paths[index].split(os.sep )[-1].rsplit('.' ,1 )[0]
lowerCamelCase_ = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' ,__UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(__UpperCamelCase )} with {file_name}''' )
lowerCamelCase_ = []
for anno in new_annos[index]:
lowerCamelCase_ = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__UpperCamelCase )
with open(f'''/{file_root}.txt''' ,'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> tuple[list, list]:
lowerCamelCase_ = []
lowerCamelCase_ = []
for label_file in glob.glob(os.path.join(__UpperCamelCase ,'*.txt' ) ):
lowerCamelCase_ = label_file.split(os.sep )[-1].rsplit('.' ,1 )[0]
with open(__UpperCamelCase ) as in_file:
lowerCamelCase_ = in_file.readlines()
lowerCamelCase_ = os.path.join(__UpperCamelCase ,f'''{label_name}.jpg''' )
lowerCamelCase_ = []
for obj_list in obj_lists:
lowerCamelCase_ = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__UpperCamelCase )
labels.append(__UpperCamelCase )
return img_paths, labels
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = 1 ) -> tuple[list, list, list]:
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
for idx in range(len(__UpperCamelCase ) ):
lowerCamelCase_ = []
lowerCamelCase_ = img_list[idx]
path_list.append(__UpperCamelCase )
lowerCamelCase_ = anno_list[idx]
lowerCamelCase_ = cva.imread(__UpperCamelCase )
if flip_type == 1:
lowerCamelCase_ = cva.flip(__UpperCamelCase ,__UpperCamelCase )
for bbox in img_annos:
lowerCamelCase_ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
lowerCamelCase_ = cva.flip(__UpperCamelCase ,__UpperCamelCase )
for bbox in img_annos:
lowerCamelCase_ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__UpperCamelCase )
new_imgs_list.append(__UpperCamelCase )
return new_imgs_list, new_annos_lists, path_list
def _UpperCamelCase ( __UpperCamelCase = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
lowerCamelCase_ = ascii_lowercase + digits
return "".join(random.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 42 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"camembert-base": 512,
}
UpperCAmelCase__ = "▁"
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = ['input_ids', 'attention_mask']
UpperCamelCase_ : Any = CamembertTokenizer
def __init__( self : Dict , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : List[Any]="</s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Optional[Any]="<unk>" , lowerCamelCase__ : Optional[Any]="<pad>" , lowerCamelCase__ : Optional[int]="<mask>" , lowerCamelCase__ : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] , **lowerCamelCase__ : Optional[int] , ) -> List[Any]:
"""simple docstring"""
__lowercase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
__lowercase = vocab_file
__lowercase = False if not self.vocab_file else True
def UpperCAmelCase_ ( self : Optional[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Any , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 332 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
UpperCamelCase_ : List[Any] = logging.get_logger(__name__)
UpperCamelCase_ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase_ : Optional[int] = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
UpperCamelCase_ : Optional[int] = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
UpperCamelCase_ : Optional[Any] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = PRETRAINED_INIT_CONFIGURATION
snake_case = RoFormerTokenizer
def __init__( self : List[Any] , _snake_case : List[Any]=None , _snake_case : str=None , _snake_case : str=True , _snake_case : Optional[int]="[UNK]" , _snake_case : List[Any]="[SEP]" , _snake_case : List[Any]="[PAD]" , _snake_case : Tuple="[CLS]" , _snake_case : List[Any]="[MASK]" , _snake_case : Optional[Any]=True , _snake_case : List[Any]=None , **_snake_case : Any , ) -> str:
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , _snake_case ) != do_lower_case
or pre_tok_state.get("strip_accents" , _snake_case ) != strip_accents
):
A_ = getattr(_snake_case , pre_tok_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = pre_tok_class(**_snake_case )
A_ = do_lower_case
def __getstate__( self : Optional[int] ) -> str:
"""simple docstring"""
A_ = self.__dict__.copy()
A_ = BertPreTokenizer()
return state
def __setstate__( self : Optional[int] , _snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A_ = d
A_ = self.__dict__["_tokenizer"].get_vocab()
A_ = PreTokenizer.custom(JiebaPreTokenizer(_snake_case ) )
def lowerCamelCase__ ( self : Dict , _snake_case : Any , _snake_case : Optional[Any]=None ) -> int:
"""simple docstring"""
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : int , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : List[str] , _snake_case : str , _snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
A_ = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
def lowerCamelCase__ ( self : str , _snake_case : Dict , _snake_case : Dict=None , _snake_case : int=None , _snake_case : Any=False , **_snake_case : int , ) -> str:
"""simple docstring"""
A_ = BertPreTokenizer()
return super().save_pretrained(_snake_case , _snake_case , _snake_case , _snake_case , **_snake_case )
| 713 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
UpperCamelCase_ : int = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class __lowerCAmelCase ( unittest.TestCase , _lowercase ):
"""simple docstring"""
def lowerCamelCase__ ( self : str ) -> Dict:
"""simple docstring"""
A_ = load_tool("text-question-answering" )
self.tool.setup()
A_ = load_tool("text-question-answering" , remote=_snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
A_ = self.tool(_snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(_snake_case , "launched the BigScience Research Workshop" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
A_ = self.remote_tool(_snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(_snake_case , "launched the BigScience Research Workshop" )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
A_ = self.tool(text=_snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(_snake_case , "launched the BigScience Research Workshop" )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
A_ = self.remote_tool(text=_snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(_snake_case , "launched the BigScience Research Workshop" )
| 482 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'''
_lowerCamelCase : str = Image.open(requests.get(A_, stream=A_ ).raw ).convert('''RGB''' )
return image
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') )
# fmt: on
return rename_keys
def snake_case_ ( A_ : str, A_ : Optional[Any], A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = dct.pop(A_ )
_lowerCamelCase : Dict = val
def snake_case_ ( A_ : List[Any], A_ : List[Any] ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCamelCase : Tuple = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
_lowerCamelCase : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
_lowerCamelCase : Union[str, Any] = torch.cat((q_bias, torch.zeros_like(A_, requires_grad=A_ ), v_bias) )
_lowerCamelCase : Any = qkv_bias
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : Tuple = 3_64 if '''coco''' in model_name else 2_24
_lowerCamelCase : Optional[int] = InstructBlipVisionConfig(image_size=A_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
_lowerCamelCase : List[str] = TaConfig.from_pretrained('''google/flan-t5-xl''', dense_act_fn='''gelu''', bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCamelCase : int = TaConfig.from_pretrained('''google/flan-t5-xxl''', dense_act_fn='''gelu''', bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
_lowerCamelCase : int = LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''', vocab_size=3_20_01 ).to_dict()
elif "vicuna-13b" in model_name:
_lowerCamelCase : Dict = LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''', vocab_size=3_20_01 ).to_dict()
else:
raise ValueError('''Model name not supported''' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
_lowerCamelCase : Any = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict()
_lowerCamelCase : List[Any] = InstructBlipConfig(vision_config=A_, text_config=A_, qformer_config=A_ )
return config, image_size
@torch.no_grad()
def snake_case_ ( A_ : str, A_ : Any=None, A_ : str=False ):
'''simple docstring'''
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''bert-base-uncased''', truncation_side='''left''' )
qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} )
if "t5" in model_name:
_lowerCamelCase : Dict = TaTokenizerFast.from_pretrained('''google/flan-t5-xl''', truncation_side='''left''' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
_lowerCamelCase : Optional[int] = LlamaTokenizerFast.from_pretrained(
'''huggyllama/llama-7b''', truncation_side='''left''', bos_token='''</s>''', unk_token='''</s>''' )
tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} )
_lowerCamelCase , _lowerCamelCase : Dict = get_blipa_config(A_ )
_lowerCamelCase : Dict = InstructBlipForConditionalGeneration(A_ ).eval()
_lowerCamelCase : Optional[int] = {
'''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''),
'''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''),
'''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''),
'''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''),
}
_lowerCamelCase , _lowerCamelCase : Any = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
_lowerCamelCase : Tuple = '''cuda:1''' if torch.cuda.is_available() else '''cpu'''
_lowerCamelCase : Dict = '''cuda:2''' if torch.cuda.is_available() else '''cpu'''
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = load_model_and_preprocess(
name=A_, model_type=A_, is_eval=A_, device=A_ )
original_model.eval()
print('''Done!''' )
# update state dict keys
_lowerCamelCase : Tuple = original_model.state_dict()
_lowerCamelCase : Tuple = create_rename_keys(A_ )
for src, dest in rename_keys:
rename_key(A_, A_, A_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCamelCase : Tuple = state_dict.pop(A_ )
if key.startswith('''Qformer.bert''' ):
_lowerCamelCase : str = key.replace('''Qformer.bert''', '''qformer''' )
if "attention.self" in key:
_lowerCamelCase : List[Any] = key.replace('''self''', '''attention''' )
if "llm_proj" in key:
_lowerCamelCase : Dict = key.replace('''llm_proj''', '''language_projection''' )
if "t5_proj" in key:
_lowerCamelCase : List[Any] = key.replace('''t5_proj''', '''language_projection''' )
if key.startswith('''llm_model''' ):
_lowerCamelCase : Optional[int] = key.replace('''llm_model''', '''language_model''' )
if key.startswith('''t5''' ):
_lowerCamelCase : List[Any] = key.replace('''t5''', '''language''' )
_lowerCamelCase : List[Any] = val
# read in qv biases
read_in_q_v_bias(A_, A_ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(A_, strict=A_ )
_lowerCamelCase : str = load_demo_image()
_lowerCamelCase : Union[str, Any] = '''What is unusual about this image?'''
# create processor
_lowerCamelCase : Tuple = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size}, image_mean=A_, image_std=A_ )
_lowerCamelCase : List[str] = InstructBlipProcessor(
image_processor=A_, tokenizer=A_, qformer_tokenizer=A_, )
_lowerCamelCase : Dict = processor(images=A_, text=A_, return_tensors='''pt''' ).to(A_ )
# make sure processor creates exact same pixel values
_lowerCamelCase : Optional[Any] = vis_processors['''eval'''](A_ ).unsqueeze(0 ).to(A_ )
_lowerCamelCase : List[Any] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ), A_ )
original_model.to(A_ )
hf_model.to(A_ )
with torch.no_grad():
if "vicuna" in model_name:
_lowerCamelCase : List[Any] = original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits
_lowerCamelCase : List[Any] = hf_model(**A_ ).logits
else:
_lowerCamelCase : Tuple = original_model(
{'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits
_lowerCamelCase : str = tokenizer('''\n''', return_tensors='''pt''' ).input_ids.to(A_ )
_lowerCamelCase : Optional[Any] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id, -1_00 )
_lowerCamelCase : Any = hf_model(**A_, labels=A_ ).logits
print('''First values of original logits:''', original_logits[0, :3, :3] )
print('''First values of HF logits:''', logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
_lowerCamelCase : Tuple = 1E-4 if '''vicuna''' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ), A_, atol=A_ )
print('''Looks ok!''' )
print('''Generating with original model...''' )
_lowerCamelCase : str = original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt}, num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('''Generating with HF model...''' )
_lowerCamelCase : Tuple = hf_model.generate(
**A_, do_sample=A_, num_beams=5, max_length=2_56, min_length=1, top_p=0.9, repetition_penalty=1.5, length_penalty=1.0, temperature=1, )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
_lowerCamelCase : Union[str, Any] = 2
print('''Original generation:''', A_ )
_lowerCamelCase : Dict = processor.batch_decode(A_, skip_special_tokens=A_ )
_lowerCamelCase : str = [text.strip() for text in output_text]
print('''HF generation:''', A_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(A_ )
hf_model.save_pretrained(A_ )
if push_to_hub:
processor.push_to_hub(F'''Salesforce/{model_name}''' )
hf_model.push_to_hub(F'''Salesforce/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
lowerCAmelCase__ = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
lowerCAmelCase__ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 83 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
lowercase__ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowercase__ = 1_2_8_0_2_2
lowercase__ = 1_2_8_0_2_8
@require_sentencepiece
class snake_case__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = MaMaaaTokenizer
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = True
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
snake_case : int = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
snake_case : int = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
snake_case : Optional[int] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Dict , **UpperCamelCase__ : Optional[int] ) -> Any:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCAmelCase ( self : Tuple , UpperCamelCase__ : Tuple ) -> str:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[int] = '''</s>'''
snake_case : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
snake_case : Tuple = self.get_tokenizer()
snake_case : Dict = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(UpperCamelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Optional[Any] = self.get_tokenizer()
snake_case : Optional[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [2, 3, 4, 5, 6] , )
snake_case : Any = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(UpperCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
snake_case : Optional[int] = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , '''This is a test''' )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case : Tuple = {'''input_ids''': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = """facebook/m2m100_418M"""
lowerCamelCase = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
lowerCamelCase = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
lowerCamelCase = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def lowerCAmelCase ( cls : List[Any] ) -> int:
"""simple docstring"""
snake_case : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
snake_case : List[str] = 1
return cls
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 12_8063 )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
snake_case : List[str] = self.tokenizer.get_vocab()
self.assertEqual(len(UpperCamelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case : Dict = '''en'''
snake_case : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self.assertIn(UpperCamelCase__ , self.tokenizer.all_special_ids )
# fmt: off
snake_case : str = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
snake_case : int = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
snake_case : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
snake_case : Union[str, Any] = tempfile.mkdtemp()
snake_case : Union[str, Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(UpperCamelCase__ )
snake_case : Optional[int] = MaMaaaTokenizer.from_pretrained(UpperCamelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , UpperCamelCase__ )
@require_torch
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Optional[Any] = '''en'''
snake_case : int = '''fr'''
snake_case : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , return_tensors='''pt''' )
snake_case : List[str] = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
snake_case : Tuple = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case : List[str] = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
snake_case : Optional[int] = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case : List[Any] = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case : List[str] = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
snake_case : int = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[12_8022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 12_8006,
} , )
| 638 | 0 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_A = logging.getLogger(__name__)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 2 ):
def get_dataset(SCREAMING_SNAKE_CASE_ ):
lowercase_ : Any = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(SCREAMING_SNAKE_CASE_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
lowercase_ : Dict = get_dataset(SCREAMING_SNAKE_CASE_ )
lowercase_ : Dict = get_dataset(SCREAMING_SNAKE_CASE_ )
lowercase_ : List[str] = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
lowercase_ : List[str] = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
lowercase_ : Any = []
for epoch in range(SCREAMING_SNAKE_CASE_ ):
# Train quickly
model.train()
for batch in dataloader:
lowercase_ : List[str] = batch
lowercase_ : Dict = model(SCREAMING_SNAKE_CASE_ )
lowercase_ : Dict = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.backward(SCREAMING_SNAKE_CASE_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__(self ) -> Tuple:
super().__init__()
lowercase_ : Any = nn.Parameter(torch.randn(1 ) )
lowercase_ : Any = nn.Parameter(torch.randn(1 ) )
def _lowerCamelCase (self , _a ) -> Optional[Any]:
return x * self.a + self.b
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase (self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ : Union[str, Any] = DummyModel()
lowercase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ : Optional[int] = dummy_dataloaders()
lowercase_ : Tuple = ProjectConfiguration(total_limit=1 , project_dir=_a , automatic_checkpoint_naming=_a )
# Train baseline
lowercase_ : Optional[int] = Accelerator(project_config=_a )
lowercase_ : Tuple = accelerator.prepare(
_a , _a , _a , _a )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def _lowerCamelCase (self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ : Optional[int] = DummyModel()
lowercase_ : str = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ : str = dummy_dataloaders()
# Train baseline
lowercase_ : Any = Accelerator()
lowercase_ : Tuple = accelerator.prepare(
_a , _a , _a , _a )
# Save initial
lowercase_ : str = os.path.join(_a , 'initial' )
accelerator.save_state(_a )
(lowercase_) : str = model.a.item(), model.b.item()
lowercase_ : Any = optimizer.state_dict()
lowercase_ : str = train(3 , _a , _a , _a , _a )
(lowercase_) : str = model.a.item(), model.b.item()
lowercase_ : Optional[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase_ : Optional[int] = DummyModel()
lowercase_ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ : Dict = dummy_dataloaders()
lowercase_ : Optional[int] = Accelerator()
lowercase_ : Optional[Any] = accelerator.prepare(
_a , _a , _a , _a )
accelerator.load_state(_a )
(lowercase_) : List[Any] = model.a.item(), model.b.item()
lowercase_ : str = optimizer.state_dict()
self.assertEqual(_a , _a )
self.assertEqual(_a , _a )
self.assertEqual(_a , _a )
lowercase_ : Union[str, Any] = train(2 , _a , _a , _a , _a )
# Save everything
lowercase_ : int = os.path.join(_a , 'checkpoint' )
accelerator.save_state(_a )
# Load everything back in and make sure all states work
accelerator.load_state(_a )
test_rands += train(1 , _a , _a , _a , _a )
(lowercase_) : Any = model.a.item(), model.b.item()
lowercase_ : str = optimizer.state_dict()
self.assertEqual(_a , _a )
self.assertEqual(_a , _a )
self.assertEqual(_a , _a )
self.assertEqual(_a , _a )
def _lowerCamelCase (self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ : str = DummyModel()
lowercase_ : str = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ : int = dummy_dataloaders()
lowercase_ : List[str] = ProjectConfiguration(automatic_checkpoint_naming=_a )
# Train baseline
lowercase_ : List[str] = Accelerator(project_dir=_a , project_config=_a )
lowercase_ : List[Any] = accelerator.prepare(
_a , _a , _a , _a )
# Save initial
accelerator.save_state()
(lowercase_) : str = model.a.item(), model.b.item()
lowercase_ : Optional[int] = optimizer.state_dict()
lowercase_ : Any = train(3 , _a , _a , _a , _a )
(lowercase_) : Dict = model.a.item(), model.b.item()
lowercase_ : List[str] = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase_ : Tuple = DummyModel()
lowercase_ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ : Union[str, Any] = dummy_dataloaders()
lowercase_ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_a )
lowercase_ : Optional[Any] = Accelerator(project_dir=_a , project_config=_a )
lowercase_ : List[Any] = accelerator.prepare(
_a , _a , _a , _a )
accelerator.load_state(os.path.join(_a , 'checkpoints' , 'checkpoint_0' ) )
(lowercase_) : Optional[int] = model.a.item(), model.b.item()
lowercase_ : Dict = optimizer.state_dict()
self.assertEqual(_a , _a )
self.assertEqual(_a , _a )
self.assertEqual(_a , _a )
lowercase_ : List[Any] = train(2 , _a , _a , _a , _a )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_a , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , _a , _a , _a , _a )
(lowercase_) : List[str] = model.a.item(), model.b.item()
lowercase_ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(_a , _a )
self.assertEqual(_a , _a )
self.assertEqual(_a , _a )
self.assertEqual(_a , _a )
def _lowerCamelCase (self ) -> Optional[int]:
lowercase_ : Optional[Any] = torch.tensor([1, 2, 3] )
lowercase_ : List[str] = torch.tensor([2, 3, 4] )
lowercase_ : Union[str, Any] = DummyModel()
lowercase_ : int = torch.optim.Adam(net.parameters() )
lowercase_ : Tuple = Accelerator()
with self.assertRaises(_a ) as ve:
accelerator.register_for_checkpointing(_a , _a , _a , _a )
lowercase_ : List[Any] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def _lowerCamelCase (self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ : Optional[int] = DummyModel()
lowercase_ : int = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ : Dict = torch.optim.lr_scheduler.StepLR(_a , step_size=1 , gamma=0.99 )
lowercase_ : List[Any] = dummy_dataloaders()
lowercase_ : str = ProjectConfiguration(automatic_checkpoint_naming=_a )
# Train baseline
lowercase_ : int = Accelerator(project_dir=_a , project_config=_a )
lowercase_ : Optional[Any] = accelerator.prepare(
_a , _a , _a , _a , _a )
# Save initial
accelerator.save_state()
lowercase_ : Dict = scheduler.state_dict()
train(3 , _a , _a , _a , _a , _a )
self.assertNotEqual(_a , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_a , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(_a , scheduler.state_dict() )
def _lowerCamelCase (self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ : Union[str, Any] = DummyModel()
lowercase_ : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=_a , total_limit=2 )
# Train baseline
lowercase_ : int = Accelerator(project_dir=_a , project_config=_a )
lowercase_ : Tuple = accelerator.prepare(_a )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_a , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_a , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(_a , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def _lowerCamelCase (self ) -> Optional[Any]:
lowercase_ : Tuple = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
_A = '/tmp/accelerate/state_checkpointing'
_A = DummyModel()
_A = torch.optim.Adam(params=model.parameters(), lr=1e-3)
_A = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_A , _A = dummy_dataloaders()
_A = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_A = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_A , _A , _A , _A , _A = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_A , _A = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_A = group['params'][0].device
break
assert param_device.type == accelerator.device.type
_A = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
_A = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
_A = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 707 | '''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__(self , _a , _a=13 , _a=32 , _a=3 , _a=4 , _a=[10, 20, 30, 40] , _a=[2, 2, 3, 2] , _a=True , _a=True , _a=37 , _a="gelu" , _a=10 , _a=0.02 , _a=["stage2", "stage3", "stage4"] , _a=[2, 3, 4] , _a=None , ) -> List[str]:
lowercase_ : Optional[int] = parent
lowercase_ : int = batch_size
lowercase_ : Union[str, Any] = image_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Any = num_stages
lowercase_ : Optional[Any] = hidden_sizes
lowercase_ : Any = depths
lowercase_ : Tuple = is_training
lowercase_ : int = use_labels
lowercase_ : Tuple = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Union[str, Any] = num_labels
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : Optional[Any] = out_features
lowercase_ : str = out_indices
lowercase_ : Dict = scope
def _lowerCamelCase (self ) -> Any:
lowercase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
if self.use_labels:
lowercase_ : Any = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : str = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase (self ) -> Optional[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCamelCase (self , _a , _a , _a ) -> Tuple:
lowercase_ : Optional[int] = ConvNextVaModel(config=_a )
model.to(_a )
model.eval()
lowercase_ : List[Any] = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase (self , _a , _a , _a ) -> Any:
lowercase_ : int = ConvNextVaForImageClassification(_a )
model.to(_a )
model.eval()
lowercase_ : Dict = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase (self , _a , _a , _a ) -> Optional[Any]:
lowercase_ : Any = ConvNextVaBackbone(config=_a )
model.to(_a )
model.eval()
lowercase_ : Union[str, Any] = model(_a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase_ : int = None
lowercase_ : List[str] = ConvNextVaBackbone(config=_a )
model.to(_a )
model.eval()
lowercase_ : Union[str, Any] = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase (self ) -> Union[str, Any]:
lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ ,lowercase_ ,lowercase_ : str = config_and_inputs
lowercase_ : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
def _lowerCamelCase (self ) -> Optional[int]:
lowercase_ : Optional[Any] = self.prepare_config_and_inputs()
lowercase_ ,lowercase_ ,lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : Dict = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( _snake_case , _snake_case , unittest.TestCase ):
"""simple docstring"""
A : List[str] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
A : Optional[int] = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
A : Optional[int] = False
A : str = False
A : int = False
A : Dict = False
A : Optional[int] = False
def _lowerCamelCase (self ) -> Optional[Any]:
lowercase_ : Dict = ConvNextVaModelTester(self )
lowercase_ : List[str] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def _lowerCamelCase (self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase (self ) -> List[Any]:
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def _lowerCamelCase (self ) -> Optional[Any]:
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def _lowerCamelCase (self ) -> Tuple:
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def _lowerCamelCase (self ) -> List[Any]:
pass
def _lowerCamelCase (self ) -> List[Any]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase_ ,lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase_ : Dict = True
if model_class.__name__ in [
*get_values(_a ),
*get_values(_a ),
]:
continue
lowercase_ : str = model_class(_a )
model.to(_a )
model.train()
lowercase_ : Tuple = self._prepare_for_class(_a , _a , return_labels=_a )
lowercase_ : List[Any] = model(**_a ).loss
loss.backward()
def _lowerCamelCase (self ) -> Optional[int]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase_ ,lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase_ : Optional[int] = False
lowercase_ : Union[str, Any] = True
if (
model_class.__name__
in [*get_values(_a ), *get_values(_a )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase_ : Union[str, Any] = model_class(_a )
model.to(_a )
model.gradient_checkpointing_enable()
model.train()
lowercase_ : List[Any] = self._prepare_for_class(_a , _a , return_labels=_a )
lowercase_ : Optional[int] = model(**_a ).loss
loss.backward()
def _lowerCamelCase (self ) -> Optional[int]:
lowercase_ ,lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Any = model_class(_a )
lowercase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : int = [*signature.parameters.keys()]
lowercase_ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _a )
def _lowerCamelCase (self ) -> List[Any]:
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCamelCase (self ) -> Any:
def check_hidden_states_output(_a , _a , _a ):
lowercase_ : Tuple = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowercase_ : str = model(**self._prepare_for_class(_a , _a ) )
lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ ,lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : int = True
check_hidden_states_output(_a , _a , _a )
def _lowerCamelCase (self ) -> int:
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def _lowerCamelCase (self ) -> Any:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Optional[int] = ConvNextVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _UpperCamelCase ( ):
lowercase_ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCamelCase (self ) -> str:
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def _lowerCamelCase (self ) -> int:
lowercase_ : List[Any] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_a )
lowercase_ : Any = self.default_image_processor
lowercase_ : Union[str, Any] = prepare_img()
lowercase_ : List[str] = preprocessor(images=_a , return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
lowercase_ : List[Any] = model(**_a )
# verify the logits
lowercase_ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
lowercase_ : Tuple = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 438 | 0 |
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowercase__ :
def __init__( self : Tuple , snake_case__ : Optional[int] , snake_case__ : List[Any]=13 , snake_case__ : Optional[int]=7 , snake_case__ : List[str]=True , snake_case__ : Union[str, Any]=True , snake_case__ : Optional[int]=True , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=99 , snake_case__ : Union[str, Any]=32 , snake_case__ : Tuple=5 , snake_case__ : Dict=4 , snake_case__ : Optional[int]=4 , snake_case__ : Any="gelu" , snake_case__ : Dict=0.0 , snake_case__ : List[str]=0.1 , snake_case__ : Dict=True , snake_case__ : str=512 , snake_case__ : Optional[Any]=16 , snake_case__ : Union[str, Any]=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Optional[int]=4 , snake_case__ : Any=None , ):
lowerCamelCase_ : List[Any] =parent
lowerCamelCase_ : Tuple =batch_size
lowerCamelCase_ : List[Any] =seq_length
lowerCamelCase_ : int =is_training
lowerCamelCase_ : List[Any] =use_input_mask
lowerCamelCase_ : Optional[int] =use_token_type_ids
lowerCamelCase_ : List[str] =use_labels
lowerCamelCase_ : int =vocab_size
lowerCamelCase_ : int =hidden_size
lowerCamelCase_ : Any =num_hidden_layers
lowerCamelCase_ : int =num_attention_heads
lowerCamelCase_ : List[str] =intermediate_multiple_size
lowerCamelCase_ : List[Any] =hidden_act
lowerCamelCase_ : Tuple =hidden_dropout
lowerCamelCase_ : Union[str, Any] =attention_dropout
lowerCamelCase_ : str =weight_tying
lowerCamelCase_ : str =max_position_embeddings
lowerCamelCase_ : Tuple =type_vocab_size
lowerCamelCase_ : List[Any] =type_sequence_label_size
lowerCamelCase_ : Tuple =initializer_range
lowerCamelCase_ : List[Any] =num_labels
lowerCamelCase_ : Optional[Any] =num_choices
lowerCamelCase_ : Union[str, Any] =scope
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Optional[int] =None
if self.use_input_mask:
lowerCamelCase_ : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : List[str] =None
if self.use_labels:
lowerCamelCase_ : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : Dict =self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self : Union[str, Any] ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[int] =self.prepare_config_and_inputs()
lowerCamelCase_ : List[str] =True
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self : str , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Any ):
lowerCamelCase_ : List[Any] =GPTNeoXJapaneseModel(config=a__ )
model.to(a__ )
model.eval()
lowerCamelCase_ : str =model(a__ , attention_mask=a__ )
lowerCamelCase_ : Dict =model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : int , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Optional[int] ):
lowerCamelCase_ : Optional[Any] =True
lowerCamelCase_ : List[Any] =GPTNeoXJapaneseModel(a__ )
model.to(a__ )
model.eval()
lowerCamelCase_ : Optional[Any] =model(a__ , attention_mask=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : str , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : List[Any] ):
lowerCamelCase_ : Tuple =GPTNeoXJapaneseForCausalLM(config=a__ )
model.to(a__ )
model.eval()
lowerCamelCase_ : List[str] =model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : str , snake_case__ : List[str] ):
lowerCamelCase_ : List[Any] =True
lowerCamelCase_ : Tuple =GPTNeoXJapaneseForCausalLM(config=a__ )
model.to(a__ )
model.eval()
# first forward pass
lowerCamelCase_ : int =model(a__ , attention_mask=a__ , use_cache=a__ )
lowerCamelCase_ : Any =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase_ : Optional[int] =ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ : Dict =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase_ : Union[str, Any] =torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase_ : Dict =torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase_ : Optional[Any] =model(a__ , attention_mask=a__ , output_hidden_states=a__ )
lowerCamelCase_ : Optional[int] =output_from_no_past["hidden_states"][0]
lowerCamelCase_ : List[str] =model(
a__ , attention_mask=a__ , past_key_values=a__ , output_hidden_states=a__ , )["hidden_states"][0]
# select random slice
lowerCamelCase_ : List[str] =ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase_ : Optional[Any] =output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase_ : Union[str, Any] =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a__ , a__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Dict =self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict =config_and_inputs
lowerCamelCase_ : List[str] ={"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase ):
_UpperCAmelCase :Tuple = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_UpperCAmelCase :int = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_UpperCAmelCase :Optional[int] = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_UpperCAmelCase :List[str] = False
_UpperCAmelCase :List[Any] = False
_UpperCAmelCase :Any = False
_UpperCAmelCase :Any = False
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : Optional[int] =GPTNeoXJapaneseModelTester(self )
lowerCamelCase_ : List[Any] =ConfigTester(self , config_class=a__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a__ , a__ , a__ )
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(a__ , a__ , a__ )
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict =self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase_ : Tuple =None
self.model_tester.create_and_check_model_as_decoder(a__ , a__ , a__ )
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(a__ , a__ , a__ )
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*a__ )
@slow
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : int ="abeja/gpt-neox-japanese-2.7b"
lowerCamelCase_ : Dict =["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
lowerCamelCase_ : Dict =[
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
lowerCamelCase_ : Optional[Any] =GPTNeoXJapaneseTokenizer.from_pretrained(a__ )
lowerCamelCase_ : Optional[Any] =GPTNeoXJapaneseForCausalLM.from_pretrained(a__ )
lowerCamelCase_ : Dict =[]
for prompt in prompts:
lowerCamelCase_ : Optional[Any] =tokenizer(a__ , return_tensors="pt" ).input_ids
lowerCamelCase_ : List[Any] =model.generate(a__ , max_length=50 )
lowerCamelCase_ : int =tokenizer.batch_decode(a__ , skip_special_tokens=a__ )
predicted_outputs += generated_string
self.assertListEqual(a__ , a__ )
| 153 |
def lowerCamelCase__ ( snake_case_ : int = 1000 ) -> int:
__snake_case = 2**power
__snake_case = str(snake_case_ )
__snake_case = list(snake_case_ )
__snake_case = 0
for i in list_num:
sum_of_num += int(snake_case_ )
return sum_of_num
if __name__ == "__main__":
snake_case_ = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
snake_case_ = solution(power)
print('Sum of the digits is: ', result)
| 592 | 0 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 491 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
_snake_case = parser.parse_args()
_snake_case = '''cpu'''
_snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
_snake_case = '''path-to-your-trained-model'''
_snake_case = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_snake_case = pipe.to(device)
# to channels last
_snake_case = pipe.unet.to(memory_format=torch.channels_last)
_snake_case = pipe.vae.to(memory_format=torch.channels_last)
_snake_case = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_snake_case = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_snake_case = torch.randn(2, 4, 6_4, 6_4)
_snake_case = torch.rand(1) * 9_9_9
_snake_case = torch.randn(2, 7_7, 7_6_8)
_snake_case = (sample, timestep, encoder_hidden_status)
try:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_snake_case = 6_6_6
_snake_case = torch.Generator(device).manual_seed(seed)
_snake_case = {'''generator''': generator}
if args.steps is not None:
_snake_case = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_snake_case = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 491 | 1 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 221 | import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase__ :
"""simple docstring"""
@staticmethod
def _UpperCAmelCase ( *__lowerCAmelCase: Optional[int] , **__lowerCAmelCase: Dict ) -> Optional[int]:
'''simple docstring'''
pass
def __lowerCAmelCase ( A_ : Image ) -> str:
__UpperCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCAmelCase ( self: List[Any] , __lowerCAmelCase: Dict , __lowerCAmelCase: List[str] , __lowerCAmelCase: List[Any] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = DepthEstimationPipeline(model=__lowerCAmelCase , image_processor=__lowerCAmelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __lowerCAmelCase )
import datasets
__UpperCAmelCase = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
__UpperCAmelCase = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , __lowerCAmelCase , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def _UpperCAmelCase ( self: Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
@slow
@require_torch
def _UpperCAmelCase ( self: Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = "Intel/dpt-large"
__UpperCAmelCase = pipeline("depth-estimation" , model=__lowerCAmelCase )
__UpperCAmelCase = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
__UpperCAmelCase = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def _UpperCAmelCase ( self: Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 221 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "efficientnet"
def __init__( self : int , _A : int = 3 , _A : int = 6_0_0 , _A : float = 2.0 , _A : float = 3.1 , _A : int = 8 , _A : List[int] = [3, 3, 5, 3, 5, 5, 3] , _A : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , _A : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , _A : List[int] = [] , _A : List[int] = [1, 2, 2, 2, 1, 2, 1] , _A : List[int] = [1, 2, 2, 3, 3, 4, 1] , _A : List[int] = [1, 6, 6, 6, 6, 6, 6] , _A : float = 0.25 , _A : str = "swish" , _A : int = 2_5_6_0 , _A : str = "mean" , _A : float = 0.02 , _A : float = 0.001 , _A : float = 0.99 , _A : float = 0.5 , _A : float = 0.2 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(**_A)
_SCREAMING_SNAKE_CASE : List[str] = num_channels
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
_SCREAMING_SNAKE_CASE : Optional[Any] = width_coefficient
_SCREAMING_SNAKE_CASE : Tuple = depth_coefficient
_SCREAMING_SNAKE_CASE : Dict = depth_divisor
_SCREAMING_SNAKE_CASE : Optional[Any] = kernel_sizes
_SCREAMING_SNAKE_CASE : Dict = in_channels
_SCREAMING_SNAKE_CASE : Any = out_channels
_SCREAMING_SNAKE_CASE : Dict = depthwise_padding
_SCREAMING_SNAKE_CASE : str = strides
_SCREAMING_SNAKE_CASE : Dict = num_block_repeats
_SCREAMING_SNAKE_CASE : Tuple = expand_ratios
_SCREAMING_SNAKE_CASE : int = squeeze_expansion_ratio
_SCREAMING_SNAKE_CASE : int = hidden_act
_SCREAMING_SNAKE_CASE : int = hidden_dim
_SCREAMING_SNAKE_CASE : Union[str, Any] = pooling_type
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : List[str] = batch_norm_eps
_SCREAMING_SNAKE_CASE : Tuple = batch_norm_momentum
_SCREAMING_SNAKE_CASE : List[str] = dropout_rate
_SCREAMING_SNAKE_CASE : Optional[Any] = drop_connect_rate
_SCREAMING_SNAKE_CASE : int = sum(_A) * 4
class _snake_case ( __snake_case ):
"""simple docstring"""
a = version.parse("1.11" )
@property
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _lowerCAmelCase ( self : str):
"""simple docstring"""
return 1e-5
| 635 | """simple docstring"""
from __future__ import annotations
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 635 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class _UpperCamelCase (UpperCamelCase__ ):
def __init__( self , __UpperCamelCase )-> Tuple:
__lowerCAmelCase = data
def __iter__( self )-> int:
for element in self.data:
yield element
def __lowerCAmelCase ( __snake_case=True ):
__lowerCAmelCase = Accelerator(even_batches=UpperCamelCase__ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case , __snake_case = False ):
if iterable:
__lowerCAmelCase = DummyIterableDataset(torch.as_tensor(range(UpperCamelCase__ ) ) )
else:
__lowerCAmelCase = TensorDataset(torch.as_tensor(range(UpperCamelCase__ ) ) )
__lowerCAmelCase = DataLoader(UpperCamelCase__ , batch_size=UpperCamelCase__ )
__lowerCAmelCase = accelerator.prepare(UpperCamelCase__ )
return dl
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
__lowerCAmelCase = create_dataloader(accelerator=UpperCamelCase__ , dataset_size=UpperCamelCase__ , batch_size=UpperCamelCase__ )
__lowerCAmelCase = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def __lowerCAmelCase ( ):
__lowerCAmelCase = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
UpperCamelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
UpperCamelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def __lowerCAmelCase ( ):
__lowerCAmelCase = create_accelerator(even_batches=UpperCamelCase__ )
verify_dataloader_batch_sizes(
UpperCamelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
UpperCamelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def __lowerCAmelCase ( ):
__lowerCAmelCase = create_accelerator(even_batches=UpperCamelCase__ )
__lowerCAmelCase = torch.nn.Linear(1 , 1 )
__lowerCAmelCase = accelerator.prepare(UpperCamelCase__ )
__lowerCAmelCase = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 )
__lowerCAmelCase = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(UpperCamelCase__ ):
__lowerCAmelCase = ddp_model(batch[0].float() )
__lowerCAmelCase = output.sum()
loss.backward()
batch_idxs.append(UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def __lowerCAmelCase ( __snake_case ):
with warnings.catch_warnings(record=UpperCamelCase__ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , UpperCamelCase__ )
assert "only supported for multi-GPU" in str(w[-1].message )
def __lowerCAmelCase ( ):
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = create_accelerator(even_batches=UpperCamelCase__ )
__lowerCAmelCase = torch.nn.Linear(1 , 1 )
__lowerCAmelCase = accelerator.prepare(UpperCamelCase__ )
__lowerCAmelCase = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 )
__lowerCAmelCase = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase__ ):
__lowerCAmelCase = train_dl.batch_sampler.even_batches
__lowerCAmelCase = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def __lowerCAmelCase ( ):
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = create_accelerator(even_batches=UpperCamelCase__ )
__lowerCAmelCase = torch.nn.Linear(1 , 1 )
__lowerCAmelCase = accelerator.prepare(UpperCamelCase__ )
create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCamelCase__ )
__lowerCAmelCase = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase__ ):
__lowerCAmelCase = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def __lowerCAmelCase ( ):
__lowerCAmelCase = create_accelerator()
__lowerCAmelCase = torch.nn.Linear(1 , 1 )
__lowerCAmelCase = accelerator.prepare(UpperCamelCase__ )
create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCamelCase__ )
with warnings.catch_warnings(record=UpperCamelCase__ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase__ ):
pass
assert issubclass(w[-1].category , UpperCamelCase__ )
assert "only supported for map-style datasets" in str(w[-1].message )
def __lowerCAmelCase ( ):
__lowerCAmelCase = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
__lowerCAmelCase = accelerator.state.distributed_type
__lowerCAmelCase = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(UpperCamelCase__ )
__lowerCAmelCase = original_state
if __name__ == "__main__":
main()
| 367 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_lowerCamelCase = 'bert-base-cased'
_lowerCamelCase = 'fp16'
_lowerCamelCase = 'bf16'
_lowerCamelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def _snake_case ( self :List[Any] ) -> Tuple:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = f'''{i + 1}'''
SCREAMING_SNAKE_CASE__ = strategy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _snake_case ( self :int ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = prefetch_policy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = state_dict_type
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(__A )
for policy in FSDP_AUTO_WRAP_POLICY:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = policy
if policy == "TRANSFORMER_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """2000"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """TRANSFORMER_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """T5Layer"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
with self.assertRaises(__A ) as cm:
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """SIZE_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """0"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _snake_case ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = mp_dtype
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = Accelerator()
if mp_dtype == "fp16":
SCREAMING_SNAKE_CASE__ = torch.floataa
elif mp_dtype == "bf16":
SCREAMING_SNAKE_CASE__ = torch.bfloataa
SCREAMING_SNAKE_CASE__ = MixedPrecision(param_dtype=__A , reduce_dtype=__A , buffer_dtype=__A )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __A )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __A ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__A )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = str(__A ).lower()
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__A ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = 0.8_2
SCREAMING_SNAKE_CASE__ = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
SCREAMING_SNAKE_CASE__ = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def _snake_case ( self :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_performance.py""" )
SCREAMING_SNAKE_CASE__ = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
SCREAMING_SNAKE_CASE__ = cmd.copy()
for i, strategy in enumerate(__A ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
SCREAMING_SNAKE_CASE__ = len(__A )
for state_dict_type in FSDP_STATE_DICT_TYPE:
SCREAMING_SNAKE_CASE__ = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
SCREAMING_SNAKE_CASE__ = cmd_config[:-1]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
SCREAMING_SNAKE_CASE__ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(__A ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() ) | 6 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ : Optional[Any] = """imagegpt"""
lowercase__ : int = ["""past_key_values"""]
lowercase__ : int = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str , lowercase : int=512 + 1 , lowercase : List[str]=32 * 32 , lowercase : int=512 , lowercase : Optional[int]=24 , lowercase : List[str]=8 , lowercase : Dict=None , lowercase : Optional[Any]="quick_gelu" , lowercase : str=0.1 , lowercase : Optional[Any]=0.1 , lowercase : List[str]=0.1 , lowercase : str=1E-5 , lowercase : Union[str, Any]=0.02 , lowercase : int=True , lowercase : Optional[int]=True , lowercase : Optional[Any]=False , lowercase : List[Any]=False , lowercase : str=False , **lowercase : Tuple , ) -> str:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = n_positions
__lowercase = n_embd
__lowercase = n_layer
__lowercase = n_head
__lowercase = n_inner
__lowercase = activation_function
__lowercase = resid_pdrop
__lowercase = embd_pdrop
__lowercase = attn_pdrop
__lowercase = layer_norm_epsilon
__lowercase = initializer_range
__lowercase = scale_attn_weights
__lowercase = use_cache
__lowercase = scale_attn_by_inverse_layer_idx
__lowercase = reorder_and_upcast_attn
__lowercase = tie_word_embeddings
super().__init__(tie_word_embeddings=_a , **_a )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def snake_case__ ( self : List[str] ) -> str:
"""simple docstring"""
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def snake_case__ ( self : int , lowercase : "FeatureExtractionMixin" , lowercase : int = 1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional["TensorType"] = None , lowercase : int = 3 , lowercase : int = 32 , lowercase : int = 32 , ) -> Any:
"""simple docstring"""
__lowercase = self._generate_dummy_images(_a , _a , _a , _a )
__lowercase = dict(preprocessor(images=_a , return_tensors=_a ) )
return inputs
| 705 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( A__ ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
a_ = gray_code_sequence_string(A__ )
#
# convert them to integers
for i in range(len(A__ ) ):
a_ = int(sequence[i] , 2 )
return sequence
def UpperCamelCase_ ( A__ ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
a_ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
a_ = gray_code_sequence_string(bit_count - 1 )
a_ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
a_ = """0""" + smaller_sequence[i]
sequence.append(A__ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
a_ = """1""" + smaller_sequence[i]
sequence.append(A__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 |
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowercase__ =subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
lowercase__ =(
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode('utf-8').split()
)
lowercase__ ='|'.join(sys.argv[1:])
lowercase__ =re.compile(rF"""^({joined_dirs}).*?\.py$""")
lowercase__ =[x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 263 | 1 |
"""simple docstring"""
from collections.abc import Callable
def _lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
'''simple docstring'''
__A : List[Any] = a
__A : Dict = b
if function(A_ ) == 0: # one of the a or b is a root for the function
return a
elif function(A_ ) == 0:
return b
elif (
function(A_ ) * function(A_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
__A : Tuple = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(A_ ) == 0:
return mid
elif function(A_ ) * function(A_ ) < 0:
__A : Optional[int] = mid
else:
__A : Dict = mid
__A : Optional[Any] = start + (end - start) / 2.0
return mid
def _lowercase ( _SCREAMING_SNAKE_CASE : Dict ) -> Dict:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 710 | """simple docstring"""
from __future__ import annotations
lowerCamelCase : str =[
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
__A : Union[str, Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) )
] # the reference grid
__A : List[Any] = 1
__A : Optional[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) )
] # the action grid
__A : Union[str, Any] = init[0]
__A : List[Any] = init[1]
__A : str = 0
__A : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__A : int = [[f, g, x, y]]
__A : Optional[int] = False # flag that is set when search is complete
__A : Dict = False # flag set if we can't find expand
while not found and not resign:
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__A : Optional[int] = cell.pop()
__A : Optional[int] = next_cell[2]
__A : List[Any] = next_cell[3]
__A : Optional[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
__A : Union[str, Any] = True
else:
for i in range(len(_SCREAMING_SNAKE_CASE ) ): # to try out different valid actions
__A : str = x + DIRECTIONS[i][0]
__A : Any = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_SCREAMING_SNAKE_CASE ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__A : Optional[int] = g + cost
__A : Any = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__A : List[str] = 1
__A : List[str] = i
__A : str = []
__A : Union[str, Any] = goal[0]
__A : Optional[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__A : Tuple = x - DIRECTIONS[action[x][y]][0]
__A : List[str] = y - DIRECTIONS[action[x][y]][1]
__A : int = xa
__A : Union[str, Any] = ya
invpath.append([x, y] )
__A : List[str] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
path.append(invpath[len(_SCREAMING_SNAKE_CASE ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowerCamelCase : List[Any] =[
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowerCamelCase : List[str] =[0, 0]
# all coordinates are given in format [y,x]
lowerCamelCase : Union[str, Any] =[len(grid) - 1, len(grid[0]) - 1]
lowerCamelCase : int =1
# the cost map which pushes the path closer to the goal
lowerCamelCase : Tuple =[[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowerCamelCase : List[Any] =abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowerCamelCase : Union[str, Any] =99
lowerCamelCase , lowerCamelCase : Union[str, Any] =search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 237 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
UpperCAmelCase = '''
Human: <<task>>
Assistant: '''
UpperCAmelCase = '''huggingface-tools/default-prompts'''
UpperCAmelCase = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="run" ):
if prompt_or_repo_id is None:
lowercase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __SCREAMING_SNAKE_CASE ) is not None:
return prompt_or_repo_id
lowercase = cached_file(
__SCREAMING_SNAKE_CASE , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
return f.read()
| 84 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = MgpstrTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def _lowercase ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
_lowerCamelCase : List[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_lowerCamelCase : Optional[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = "tester"
_lowerCamelCase : Optional[Any] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_lowerCamelCase : Optional[Any] = tokenizer.encode([special_token] ,add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) ,1 )
_lowerCamelCase : int = tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase, _lowerCamelCase : List[Any] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertNotEqual(len(__lowerCAmelCase ) ,0 )
_lowerCamelCase : Optional[int] = tokenizer.decode(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(text_a.replace(" " ,"" ) ,__lowerCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _lowercase ( self: str ):
'''simple docstring'''
pass | 46 | 0 |
import qiskit
def snake_case_ ( __lowercase = 2 ):
UpperCAmelCase_ : Union[str, Any] = qubits
# Using Aer's simulator
UpperCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
# Creating a Quantum Circuit acting on the q register
UpperCAmelCase_ : Any = qiskit.QuantumCircuit(__lowercase , __lowercase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , __lowercase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , __lowercase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(__lowercase ) ) , list(range(__lowercase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
UpperCAmelCase_ : List[str] = qiskit.execute(__lowercase , __lowercase , shots=1_0_0_0 )
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(F'Total count for various states are: {quantum_entanglement(3)}') | 641 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case_ ( ):
UpperCAmelCase_ : int = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
UpperCAmelCase_ : Dict = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('''RGB''' )
return image
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Any = dct.pop(__lowercase )
UpperCAmelCase_ : Optional[Any] = val
def snake_case_ ( __lowercase , __lowercase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase_ : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase_ : int = torch.cat((q_bias, torch.zeros_like(__lowercase , requires_grad=__lowercase ), v_bias) )
UpperCAmelCase_ : List[str] = qkv_bias
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = 3_6_4 if '''coco''' in model_name else 2_2_4
UpperCAmelCase_ : Any = BlipaVisionConfig(image_size=__lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase_ : Any = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=__lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase_ : List[str] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=__lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase_ : List[str] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase_ : Any = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
UpperCAmelCase_ : List[Any] = BlipaConfig(vision_config=__lowercase , text_config=__lowercase )
return config, image_size
@torch.no_grad()
def snake_case_ ( __lowercase , __lowercase=None , __lowercase=False ):
UpperCAmelCase_ : List[Any] = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
UpperCAmelCase_ : List[str] = tokenizer('''\n''' , add_special_tokens=__lowercase ).input_ids[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = get_blipa_config(__lowercase , eos_token_id=__lowercase )
UpperCAmelCase_ : List[Any] = BlipaForConditionalGeneration(__lowercase ).eval()
UpperCAmelCase_ : Tuple = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = load_model_and_preprocess(
name=__lowercase , model_type=__lowercase , is_eval=__lowercase , device=__lowercase )
original_model.eval()
print('''Done!''' )
# update state dict keys
UpperCAmelCase_ : Optional[Any] = original_model.state_dict()
UpperCAmelCase_ : List[Any] = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(__lowercase )
if key.startswith('''Qformer.bert''' ):
UpperCAmelCase_ : Tuple = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
UpperCAmelCase_ : Optional[Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
UpperCAmelCase_ : Any = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
UpperCAmelCase_ : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
UpperCAmelCase_ : Any = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
UpperCAmelCase_ : Optional[Any] = key.replace('''t5''' , '''language''' )
UpperCAmelCase_ : List[str] = val
# read in qv biases
read_in_q_v_bias(__lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = hf_model.load_state_dict(__lowercase , strict=__lowercase )
assert len(__lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase_ : str = load_demo_image()
UpperCAmelCase_ : Any = vis_processors['''eval'''](__lowercase ).unsqueeze(0 ).to(__lowercase )
UpperCAmelCase_ : Optional[Any] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(__lowercase )
# create processor
UpperCAmelCase_ : Optional[int] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=__lowercase , image_std=__lowercase )
UpperCAmelCase_ : Tuple = BlipaProcessor(image_processor=__lowercase , tokenizer=__lowercase )
UpperCAmelCase_ : str = processor(images=__lowercase , return_tensors='''pt''' ).pixel_values.to(__lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowercase , __lowercase )
original_model.to(__lowercase )
hf_model.to(__lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase_ : Tuple = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
UpperCAmelCase_ : Optional[int] = hf_model(__lowercase , __lowercase ).logits
else:
UpperCAmelCase_ : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
UpperCAmelCase_ : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
UpperCAmelCase_ : int = hf_model(__lowercase , __lowercase , labels=__lowercase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=__lowercase )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__lowercase )
else:
# cast to same type
UpperCAmelCase_ : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(__lowercase ) , __lowercase , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
UpperCAmelCase_ : Union[str, Any] = ''''''
UpperCAmelCase_ : Optional[Any] = tokenizer(__lowercase , return_tensors='''pt''' ).input_ids.to(__lowercase )
UpperCAmelCase_ : int = original_model.generate({'''image''': original_pixel_values} )
UpperCAmelCase_ : Optional[int] = hf_model.generate(
__lowercase , __lowercase , do_sample=__lowercase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , __lowercase )
UpperCAmelCase_ : Tuple = input_ids.shape[1]
UpperCAmelCase_ : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowercase )
UpperCAmelCase_ : Optional[int] = [text.strip() for text in output_text]
print('''HF generation:''' , __lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowercase )
hf_model.save_pretrained(__lowercase )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
__UpperCamelCase : Optional[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__UpperCamelCase : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 641 | 1 |
def UpperCamelCase_( _A :str )-> bool:
UpperCamelCase__ = 0
for ch in input_str:
UpperCamelCase__ = ord(_A )
UpperCamelCase__ = pow(2 , _A )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 551 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class _UpperCamelCase (a_ ):
snake_case_ = """ibert"""
def __init__( self , __UpperCamelCase=3_0_5_2_2 , __UpperCamelCase=7_6_8 , __UpperCamelCase=1_2 , __UpperCamelCase=1_2 , __UpperCamelCase=3_0_7_2 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_1_2 , __UpperCamelCase=2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1e-12 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase="absolute" , __UpperCamelCase=False , __UpperCamelCase="none" , **__UpperCamelCase , )-> Dict:
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = quant_mode
__lowerCAmelCase = force_dequant
class _UpperCamelCase (a_ ):
@property
def __UpperCAmelCase ( self )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 290 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase : Tuple = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
lowerCamelCase : int = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
lowerCamelCase : List[str] = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase (datasets.Metric ):
def __UpperCAmelCase ( self )-> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 , __UpperCamelCase = 4 , )-> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__UpperCamelCase , hypotheses=__UpperCamelCase , min_len=__UpperCamelCase , max_len=__UpperCamelCase )
}
| 290 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_lowerCamelCase = None
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
_lowerCamelCase = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
_lowerCamelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = ["input_ids", "attention_mask"]
lowerCamelCase_ = MBartTokenizer
lowerCamelCase_ = []
lowerCamelCase_ = []
def __init__( self :int , __A :Optional[Any]=None , __A :Union[str, Any]=None , __A :Optional[Any]="<s>" , __A :int="</s>" , __A :Tuple="</s>" , __A :int="<s>" , __A :str="<unk>" , __A :str="<pad>" , __A :Dict="<mask>" , __A :Optional[Any]=None , __A :int=None , __A :Tuple=None , **__A :Optional[int] , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
vocab_file=__A , tokenizer_file=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , src_lang=__A , tgt_lang=__A , additional_special_tokens=__A , **__A , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
SCREAMING_SNAKE_CASE__ = {
lang_code: self.convert_tokens_to_ids(__A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE__ = src_lang if src_lang is not None else """en_XX"""
SCREAMING_SNAKE_CASE__ = self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _snake_case ( self :Union[str, Any] ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self :Tuple , __A :str ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self :Any , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self :Any , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self :List[str] , __A :Dict , __A :str , __A :Optional[str] , __A :Optional[str] , **__A :int ) -> Dict:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
SCREAMING_SNAKE_CASE__ = src_lang
SCREAMING_SNAKE_CASE__ = self(__A , add_special_tokens=__A , return_tensors=__A , **__A )
SCREAMING_SNAKE_CASE__ = self.convert_tokens_to_ids(__A )
SCREAMING_SNAKE_CASE__ = tgt_lang_id
return inputs
def _snake_case ( self :Optional[int] , __A :List[str] , __A :str = "en_XX" , __A :Optional[List[str]] = None , __A :str = "ro_RO" , **__A :Optional[int] , ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = src_lang
SCREAMING_SNAKE_CASE__ = tgt_lang
return super().prepare_seqaseq_batch(__A , __A , **__A )
def _snake_case ( self :List[str] ) -> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self :Optional[int] ) -> List[Any]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self :Any , __A :Tuple ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.convert_tokens_to_ids(__A )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE__ = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE__ = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self :Any , __A :str ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.convert_tokens_to_ids(__A )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE__ = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE__ = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self :str , __A :str , __A :Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,) | 6 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_lowerCamelCase = logging.getLogger(__name__)
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=30522, type=int)
_lowerCamelCase = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, 'rb') as fp:
_lowerCamelCase = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
_lowerCamelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
_lowerCamelCase = [0] * args.vocab_size
for k, v in counter.items():
_lowerCamelCase = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL) | 6 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase__ ( UpperCamelCase ):
lowerCAmelCase_ : int = ["""image_processor""", """tokenizer"""]
lowerCAmelCase_ : str = """ViltImageProcessor"""
lowerCAmelCase_ : List[str] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] , snake_case : Tuple=None , snake_case : str=None , **snake_case : Any ) -> Dict:
'''simple docstring'''
A = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case , )
A = kwargs.pop('feature_extractor' )
A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case , snake_case )
A = self.image_processor
def __call__( self : Tuple , snake_case : Union[str, Any] , snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case : bool = True , snake_case : Union[bool, str, PaddingStrategy] = False , snake_case : Union[bool, str, TruncationStrategy] = None , snake_case : Optional[int] = None , snake_case : int = 0 , snake_case : Optional[int] = None , snake_case : Optional[bool] = None , snake_case : Optional[bool] = None , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = True , snake_case : Optional[Union[str, TensorType]] = None , **snake_case : int , ) -> BatchEncoding:
'''simple docstring'''
A = self.tokenizer(
text=snake_case , add_special_tokens=snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , stride=snake_case , pad_to_multiple_of=snake_case , return_token_type_ids=snake_case , return_attention_mask=snake_case , return_overflowing_tokens=snake_case , return_special_tokens_mask=snake_case , return_offsets_mapping=snake_case , return_length=snake_case , verbose=snake_case , return_tensors=snake_case , **snake_case , )
# add pixel_values + pixel_mask
A = self.image_processor(snake_case , return_tensors=snake_case )
encoding.update(snake_case )
return encoding
def A_ ( self : str , *snake_case : Dict , **snake_case : Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def A_ ( self : Union[str, Any] , *snake_case : Optional[Any] , **snake_case : int ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*snake_case , **snake_case )
@property
def A_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case , )
return self.image_processor_class
@property
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case , )
return self.image_processor
| 109 |
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
A = logging.get_logger(__name__)
def lowerCAmelCase__ ( lowerCamelCase__=None , lowerCamelCase__=None ) -> Dict:
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ : List[str] = list_field(
default=[] ,metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} ,)
lowerCAmelCase_ : List[int] = list_field(
default=[8] ,metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
lowerCAmelCase_ : List[int] = list_field(
default=[8, 32, 1_28, 5_12] ,metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} ,)
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} ,)
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} ,)
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
lowerCAmelCase_ : bool = field(default=UpperCamelCase ,metadata={"""help""": """Use FP16 to accelerate inference."""} )
lowerCAmelCase_ : bool = field(default=UpperCamelCase ,metadata={"""help""": """Benchmark training of model"""} )
lowerCAmelCase_ : bool = field(default=UpperCamelCase ,metadata={"""help""": """Verbose memory tracing"""} )
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} ,)
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} ,)
lowerCAmelCase_ : bool = field(default=UpperCamelCase ,metadata={"""help""": """Trace memory line by line"""} )
lowerCAmelCase_ : bool = field(default=UpperCamelCase ,metadata={"""help""": """Save result to a CSV file"""} )
lowerCAmelCase_ : bool = field(default=UpperCamelCase ,metadata={"""help""": """Save all print statements in a log file"""} )
lowerCAmelCase_ : bool = field(default=UpperCamelCase ,metadata={"""help""": """Whether to print environment information"""} )
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} ,)
lowerCAmelCase_ : str = field(
default=f"""inference_time_{round(time() )}.csv""" ,metadata={"""help""": """CSV filename used if saving time results to csv."""} ,)
lowerCAmelCase_ : str = field(
default=f"""inference_memory_{round(time() )}.csv""" ,metadata={"""help""": """CSV filename used if saving memory results to csv."""} ,)
lowerCAmelCase_ : str = field(
default=f"""train_time_{round(time() )}.csv""" ,metadata={"""help""": """CSV filename used if saving time results to csv for training."""} ,)
lowerCAmelCase_ : str = field(
default=f"""train_memory_{round(time() )}.csv""" ,metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} ,)
lowerCAmelCase_ : str = field(
default=f"""env_info_{round(time() )}.csv""" ,metadata={"""help""": """CSV filename used if saving environment information."""} ,)
lowerCAmelCase_ : str = field(
default=f"""log_{round(time() )}.csv""" ,metadata={"""help""": """Log filename used if print statements are saved in log."""} ,)
lowerCAmelCase_ : int = field(default=3 ,metadata={"""help""": """Times an experiment will be run."""} )
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} ,)
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , snake_case , )
def A_ ( self : str ) -> List[Any]:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].' )
return self.models
@property
def A_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.' )
return False
else:
return True
| 109 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : int = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''data2vec-text'''
def __init__( self , _lowercase=3_0_5_2_2 , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=2 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , _lowercase=None , **_lowercase , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
snake_case_ : List[Any] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : Tuple = hidden_act
snake_case_ : str = intermediate_size
snake_case_ : Dict = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : str = type_vocab_size
snake_case_ : Tuple = initializer_range
snake_case_ : Optional[Any] = layer_norm_eps
snake_case_ : Optional[int] = position_embedding_type
snake_case_ : List[Any] = use_cache
snake_case_ : Tuple = classifier_dropout
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 58 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
snake_case : Tuple = {'target_lang': 'fi', 'source_lang': 'en'}
snake_case : str = '>>zh<<'
snake_case : Optional[Any] = 'Helsinki-NLP/'
if is_torch_available():
snake_case : Optional[Any] = 'pt'
elif is_tf_available():
snake_case : Optional[int] = 'tf'
else:
snake_case : Optional[Any] = 'jax'
@require_sentencepiece
class lowerCamelCase__( snake_case_ , unittest.TestCase ):
UpperCamelCase : Any = MarianTokenizer
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Any = True
def __magic_name__ ( self ):
"""simple docstring"""
super().setUp()
__lowercase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__lowercase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__lowercase = Path(self.tmpdirname )
save_json(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__lowercase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self , **__UpperCAmelCase ):
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """</s>"""
__lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__UpperCAmelCase ) , 9 )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
__lowercase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(__UpperCAmelCase , batch.input_ids[0] )
__lowercase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__UpperCAmelCase )
__lowercase = [x.name for x in Path(__UpperCAmelCase ).glob("""*""" )]
self.assertIn("""source.spm""" , __UpperCAmelCase )
MarianTokenizer.from_pretrained(__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = tok(
["""I am a small frog""" * 1_0_0_0, """I am a small frog"""] , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = {"""input_ids""": [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__lowercase = """Tämä on testi"""
__lowercase = """This is a test"""
__lowercase = [7_6, 7, 2_0_4_7, 2]
__lowercase = [6_9, 1_2, 1_1, 9_4_0, 2]
__lowercase = tokenizer(__UpperCAmelCase ).input_ids
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer(text_target=__UpperCAmelCase ).input_ids
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 566 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class A_ ( lowerCAmelCase__ ):
"""simple docstring"""
lowercase : List[Any] = "altclip_text_model"
def __init__( self , __UpperCAmelCase=25_00_02 , __UpperCAmelCase=10_24 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=40_96 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_14 , __UpperCAmelCase=1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-0_5 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=7_68 , **__UpperCAmelCase , ) -> Any:
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
a : List[str] = vocab_size
a : str = hidden_size
a : Tuple = num_hidden_layers
a : Optional[Any] = num_attention_heads
a : Tuple = hidden_act
a : List[Any] = intermediate_size
a : Union[str, Any] = hidden_dropout_prob
a : List[Any] = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Union[str, Any] = type_vocab_size
a : Union[str, Any] = initializer_range
a : Dict = initializer_factor
a : Dict = layer_norm_eps
a : Optional[Any] = position_embedding_type
a : str = use_cache
a : Tuple = project_dim
class A_ ( lowerCAmelCase__ ):
"""simple docstring"""
lowercase : Tuple = "altclip_vision_model"
def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=30_72 , __UpperCAmelCase=5_12 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3 , __UpperCAmelCase=2_24 , __UpperCAmelCase=32 , __UpperCAmelCase="quick_gelu" , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1.0 , **__UpperCAmelCase , ) -> str:
super().__init__(**_lowerCamelCase )
a : int = hidden_size
a : List[Any] = intermediate_size
a : Union[str, Any] = projection_dim
a : Union[str, Any] = num_hidden_layers
a : Optional[Any] = num_attention_heads
a : Tuple = num_channels
a : Union[str, Any] = patch_size
a : Tuple = image_size
a : Tuple = initializer_range
a : List[Any] = initializer_factor
a : Tuple = attention_dropout
a : Any = layer_norm_eps
a : Optional[Any] = hidden_act
@classmethod
def lowercase_ ( cls , __UpperCAmelCase , **__UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_lowerCamelCase )
a , a : Optional[Any] = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
a : Optional[int] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class A_ ( lowerCAmelCase__ ):
"""simple docstring"""
lowercase : Optional[Any] = "altclip"
lowercase : Union[str, Any] = True
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=7_68 , __UpperCAmelCase=2.6592 , **__UpperCAmelCase ) -> List[Any]:
a : Any = kwargs.pop('text_config_dict' , _lowerCamelCase )
a : List[Any] = kwargs.pop('vision_config_dict' , _lowerCamelCase )
super().__init__(**_lowerCamelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
a : Union[str, Any] = {}
# This is the complete result when using `text_config_dict`.
a : Dict = AltCLIPTextConfig(**_lowerCamelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
a : Union[str, Any] = (
f'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
f'The value `text_config_dict[\"{key}\"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
a : int = (
f'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
f'value `text_config[\"{key}\"]` will be overriden.'
)
logger.warning(_lowerCamelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
a : Optional[int] = {}
# This is the complete result when using `vision_config_dict`.
a : Optional[Any] = AltCLIPVisionConfig(**_lowerCamelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
a : Optional[int] = {
str(_lowerCamelCase ): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
a : Tuple = (
f'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
f'values. The value `vision_config_dict[\"{key}\"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
a : str = (
f'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
f'The value `vision_config[\"{key}\"]` will be overriden.'
)
logger.warning(_lowerCamelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
a : List[str] = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
a : Optional[Any] = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
a : Union[str, Any] = AltCLIPTextConfig(**_lowerCamelCase )
a : List[str] = AltCLIPVisionConfig(**_lowerCamelCase )
a : str = projection_dim
a : Optional[int] = logit_scale_init_value
a : Any = 1.0
@classmethod
def lowercase_ ( cls , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowerCamelCase )
def lowercase_ ( self ) -> List[Any]:
a : List[str] = copy.deepcopy(self.__dict__ )
a : List[Any] = self.text_config.to_dict()
a : Union[str, Any] = self.vision_config.to_dict()
a : Any = self.__class__.model_type
return output
| 712 |
"""simple docstring"""
from math import factorial, pi
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ = 30 ) -> float:
if not isinstance(UpperCAmelCase__ , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
a : Tuple = float(UpperCAmelCase__ )
a : Any = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(UpperCAmelCase__ ) )
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ = 30 ) -> float:
if not isinstance(UpperCAmelCase__ , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
a : Dict = float(UpperCAmelCase__ )
a : List[str] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 509 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : int = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
lowerCAmelCase__ : Tuple = Image.open(requests.get(A_ , stream=A_ ).raw ).convert('''RGB''' )
return image
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Optional[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ : Dict = dct.pop(A_ )
lowerCAmelCase__ : int = val
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCAmelCase__ : List[str] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
lowerCAmelCase__ : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
lowerCAmelCase__ : List[str] = torch.cat((q_bias, torch.zeros_like(A_ , requires_grad=A_ ), v_bias) )
lowerCAmelCase__ : str = qkv_bias
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Any = 3_64 if '''coco''' in model_name else 2_24
lowerCAmelCase__ : List[str] = BlipaVisionConfig(image_size=A_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowerCAmelCase__ : List[str] = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=A_ ).to_dict()
elif "opt-6.7b" in model_name:
lowerCAmelCase__ : Dict = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=A_ ).to_dict()
elif "t5-xl" in model_name:
lowerCAmelCase__ : int = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCAmelCase__ : List[Any] = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
lowerCAmelCase__ : str = BlipaConfig(vision_config=A_ , text_config=A_ )
return config, image_size
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( A_ , A_=None , A_=False ):
lowerCAmelCase__ : Optional[Any] = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
lowerCAmelCase__ : str = tokenizer('''\n''' , add_special_tokens=A_ ).input_ids[0]
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = get_blipa_config(A_ , eos_token_id=A_ )
lowerCAmelCase__ : List[Any] = BlipaForConditionalGeneration(A_ ).eval()
lowerCAmelCase__ : str = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
lowerCAmelCase__ ,lowerCAmelCase__ : Any = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
lowerCAmelCase__ : Union[str, Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = load_model_and_preprocess(
name=A_ , model_type=A_ , is_eval=A_ , device=A_ )
original_model.eval()
print('''Done!''' )
# update state dict keys
lowerCAmelCase__ : Optional[Any] = original_model.state_dict()
lowerCAmelCase__ : List[Any] = create_rename_keys(A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCAmelCase__ : Any = state_dict.pop(A_ )
if key.startswith('''Qformer.bert''' ):
lowerCAmelCase__ : int = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
lowerCAmelCase__ : str = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
lowerCAmelCase__ : List[Any] = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
lowerCAmelCase__ : List[Any] = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
lowerCAmelCase__ : List[Any] = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
lowerCAmelCase__ : Any = key.replace('''t5''' , '''language''' )
lowerCAmelCase__ : Optional[int] = val
# read in qv biases
read_in_q_v_bias(A_ , A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Any = hf_model.load_state_dict(A_ , strict=A_ )
assert len(A_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowerCAmelCase__ : Dict = load_demo_image()
lowerCAmelCase__ : Optional[Any] = vis_processors['''eval'''](A_ ).unsqueeze(0 ).to(A_ )
lowerCAmelCase__ : List[Any] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(A_ )
# create processor
lowerCAmelCase__ : Optional[Any] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=A_ , image_std=A_ )
lowerCAmelCase__ : List[str] = BlipaProcessor(image_processor=A_ , tokenizer=A_ )
lowerCAmelCase__ : Any = processor(images=A_ , return_tensors='''pt''' ).pixel_values.to(A_ )
# make sure processor creates exact same pixel values
assert torch.allclose(A_ , A_ )
original_model.to(A_ )
hf_model.to(A_ )
with torch.no_grad():
if "opt" in model_name:
lowerCAmelCase__ : Any = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
lowerCAmelCase__ : List[Any] = hf_model(A_ , A_ ).logits
else:
lowerCAmelCase__ : Dict = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
lowerCAmelCase__ : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
lowerCAmelCase__ : Dict = hf_model(A_ , A_ , labels=A_ ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowerCAmelCase__ : Optional[Any] = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=A_ )
assert torch.allclose(logits[0, :3, :3] , A_ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowerCAmelCase__ : Dict = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=A_ )
else:
# cast to same type
lowerCAmelCase__ : List[str] = logits.dtype
assert torch.allclose(original_logits.to(A_ ) , A_ , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
lowerCAmelCase__ : Union[str, Any] = ''''''
lowerCAmelCase__ : Union[str, Any] = tokenizer(A_ , return_tensors='''pt''' ).input_ids.to(A_ )
lowerCAmelCase__ : Any = original_model.generate({'''image''': original_pixel_values} )
lowerCAmelCase__ : Optional[int] = hf_model.generate(
A_ , A_ , do_sample=A_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , A_ )
lowerCAmelCase__ : str = input_ids.shape[1]
lowerCAmelCase__ : Union[str, Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=A_ )
lowerCAmelCase__ : Dict = [text.strip() for text in output_text]
print('''HF generation:''' , A_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(A_ )
hf_model.save_pretrained(A_ )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
__UpperCamelCase : Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 450 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__UpperCamelCase : Any = None
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : str = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase : Any = {
'''albert-base-v1''': 5_1_2,
'''albert-large-v1''': 5_1_2,
'''albert-xlarge-v1''': 5_1_2,
'''albert-xxlarge-v1''': 5_1_2,
'''albert-base-v2''': 5_1_2,
'''albert-large-v2''': 5_1_2,
'''albert-xlarge-v2''': 5_1_2,
'''albert-xxlarge-v2''': 5_1_2,
}
__UpperCamelCase : Union[str, Any] = '''▁'''
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = AlbertTokenizer
def __init__( self : Union[str, Any] ,lowercase_ : List[str]=None ,lowercase_ : Union[str, Any]=None ,lowercase_ : str=True ,lowercase_ : Optional[Any]=True ,lowercase_ : str=False ,lowercase_ : Tuple="[CLS]" ,lowercase_ : Optional[int]="[SEP]" ,lowercase_ : Optional[Any]="<unk>" ,lowercase_ : List[Any]="[SEP]" ,lowercase_ : Optional[int]="<pad>" ,lowercase_ : List[Any]="[CLS]" ,lowercase_ : Optional[int]="[MASK]" ,**lowercase_ : Optional[int] ,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase__ : Optional[Any] = (
AddedToken(lowercase_ ,lstrip=lowercase_ ,rstrip=lowercase_ ,normalized=lowercase_ )
if isinstance(lowercase_ ,lowercase_ )
else mask_token
)
super().__init__(
lowercase_ ,tokenizer_file=lowercase_ ,do_lower_case=lowercase_ ,remove_space=lowercase_ ,keep_accents=lowercase_ ,bos_token=lowercase_ ,eos_token=lowercase_ ,unk_token=lowercase_ ,sep_token=lowercase_ ,pad_token=lowercase_ ,cls_token=lowercase_ ,mask_token=lowercase_ ,**lowercase_ ,)
lowerCAmelCase__ : List[Any] = do_lower_case
lowerCAmelCase__ : str = remove_space
lowerCAmelCase__ : Optional[int] = keep_accents
lowerCAmelCase__ : Dict = vocab_file
lowerCAmelCase__ : Optional[int] = False if not self.vocab_file else True
def __lowerCAmelCase ( self : Dict ,lowercase_ : List[int] ,lowercase_ : Optional[List[int]] = None ):
lowerCAmelCase__ : List[Any] = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : List[int] ,lowercase_ : Optional[List[int]] = None ):
lowerCAmelCase__ : List[str] = [self.sep_token_id]
lowerCAmelCase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : str ,lowercase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ : Union[str, Any] = os.path.join(
lowercase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file ,lowercase_ )
return (out_vocab_file,)
| 450 | 1 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def UpperCamelCase_ ( lowerCAmelCase__ = 8 ):
"""simple docstring"""
_lowerCAmelCase : int = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowerCAmelCase__ ) for _ in range(lowerCAmelCase__ ) )
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
i -= len(lowerCAmelCase__ )
_lowerCAmelCase : Optional[Any] = i // 3
_lowerCAmelCase : Tuple = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowerCAmelCase : Optional[Any] = (
chars_incl
+ random(lowerCAmelCase__ , quotient + remainder )
+ random(lowerCAmelCase__ , lowerCAmelCase__ )
+ random(lowerCAmelCase__ , lowerCAmelCase__ )
)
_lowerCAmelCase : Tuple = list(lowerCAmelCase__ )
shuffle(lowerCAmelCase__ )
return "".join(lowerCAmelCase__ )
# random is a generalised function for letters, characters and numbers
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
return "".join(secrets.choice(lowerCAmelCase__ ) for _ in range(lowerCAmelCase__ ) )
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
pass # Put your code here...
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
pass # Put your code here...
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
pass # Put your code here...
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ = 8 ):
"""simple docstring"""
if len(lowerCAmelCase__ ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowerCAmelCase : List[Any] = any(char in ascii_uppercase for char in password )
_lowerCAmelCase : Optional[int] = any(char in ascii_lowercase for char in password )
_lowerCAmelCase : Tuple = any(char in digits for char in password )
_lowerCAmelCase : Any = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : Tuple = int(input("Please indicate the max length of your password: " ).strip() )
_lowerCAmelCase : Optional[Any] = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(lowerCAmelCase__ ) )
print(
"Alternative Password generated:" , alternative_password_generator(lowerCAmelCase__ , lowerCAmelCase__ ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 587 | import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
snake_case = False
snake_case = logging.get_logger(__name__)
snake_case = "ybelkada/fonts"
def UpperCamelCase_ ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
"Pix2StructImageProcessor. Please upgrade torch." )
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
requires_backends(lowerCAmelCase__ , ["torch"] )
_check_torch_version()
_lowerCAmelCase : int = image_tensor.unsqueeze(0 )
_lowerCAmelCase : List[str] = torch.nn.functional.unfold(lowerCAmelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
_lowerCAmelCase : Any = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , lowerCAmelCase__ , lowerCAmelCase__ , -1 )
_lowerCAmelCase : int = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ = 36 , lowerCAmelCase__ = "black" , lowerCAmelCase__ = "white" , lowerCAmelCase__ = 5 , lowerCAmelCase__ = 5 , lowerCAmelCase__ = 5 , lowerCAmelCase__ = 5 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ):
"""simple docstring"""
requires_backends(lowerCAmelCase__ , "vision" )
# Add new lines so that each line is no more than 80 characters.
_lowerCAmelCase : List[Any] = textwrap.TextWrapper(width=80 )
_lowerCAmelCase : Optional[Any] = wrapper.wrap(text=lowerCAmelCase__ )
_lowerCAmelCase : int = "\n".join(lowerCAmelCase__ )
if font_bytes is not None and font_path is None:
_lowerCAmelCase : Optional[int] = io.BytesIO(lowerCAmelCase__ )
elif font_path is not None:
_lowerCAmelCase : Dict = font_path
else:
_lowerCAmelCase : int = hf_hub_download(lowerCAmelCase__ , "Arial.TTF" )
_lowerCAmelCase : str = ImageFont.truetype(lowerCAmelCase__ , encoding="UTF-8" , size=lowerCAmelCase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
_lowerCAmelCase : Union[str, Any] = ImageDraw.Draw(Image.new("RGB" , (1, 1) , lowerCAmelCase__ ) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = temp_draw.textbbox((0, 0) , lowerCAmelCase__ , lowerCAmelCase__ )
# Create the actual image with a bit of padding around the text.
_lowerCAmelCase : List[str] = text_width + left_padding + right_padding
_lowerCAmelCase : Union[str, Any] = text_height + top_padding + bottom_padding
_lowerCAmelCase : Any = Image.new("RGB" , (image_width, image_height) , lowerCAmelCase__ )
_lowerCAmelCase : List[Any] = ImageDraw.Draw(lowerCAmelCase__ )
draw.text(xy=(left_padding, top_padding) , text=lowerCAmelCase__ , fill=lowerCAmelCase__ , font=lowerCAmelCase__ )
return image
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
requires_backends(lowerCAmelCase__ , "vision" )
# Convert to PIL image if necessary
_lowerCAmelCase : Union[str, Any] = to_pil_image(lowerCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = render_text(lowerCAmelCase__ , **lowerCAmelCase__ )
_lowerCAmelCase : str = max(header_image.width , image.width )
_lowerCAmelCase : Optional[Any] = int(image.height * (new_width / image.width) )
_lowerCAmelCase : Dict = int(header_image.height * (new_width / header_image.width) )
_lowerCAmelCase : int = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
_lowerCAmelCase : Union[str, Any] = to_numpy_array(lowerCAmelCase__ )
if infer_channel_dimension_format(lowerCAmelCase__ ) == ChannelDimension.LAST:
_lowerCAmelCase : Union[str, Any] = to_channel_dimension_format(lowerCAmelCase__ , ChannelDimension.LAST )
return new_image
class __A ( snake_case__ ):
'''simple docstring'''
a_ = ['''flattened_patches''']
def __init__( self , _snake_case = True , _snake_case = True , _snake_case = None , _snake_case = 2048 , _snake_case = False , **_snake_case , ):
super().__init__(**_snake_case )
_lowerCAmelCase : List[Any] = patch_size if patch_size is not None else {"height": 16, "width": 16}
_lowerCAmelCase : Any = do_normalize
_lowerCAmelCase : Tuple = do_convert_rgb
_lowerCAmelCase : Tuple = max_patches
_lowerCAmelCase : Optional[int] = is_vqa
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , **_snake_case ):
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
_lowerCAmelCase : Dict = to_channel_dimension_format(_snake_case , ChannelDimension.FIRST )
_lowerCAmelCase : Dict = torch.from_numpy(_snake_case )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = patch_size["height"], patch_size["width"]
_lowerCAmelCase , _lowerCAmelCase : Dict = get_image_size(_snake_case )
# maximize scale s.t.
_lowerCAmelCase : Any = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
_lowerCAmelCase : str = max(min(math.floor(scale * image_height / patch_height ) , _snake_case ) , 1 )
_lowerCAmelCase : Any = max(min(math.floor(scale * image_width / patch_width ) , _snake_case ) , 1 )
_lowerCAmelCase : Any = max(num_feasible_rows * patch_height , 1 )
_lowerCAmelCase : Optional[int] = max(num_feasible_cols * patch_width , 1 )
_lowerCAmelCase : Tuple = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=_snake_case , antialias=_snake_case , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
_lowerCAmelCase : str = torch_extract_patches(_snake_case , _snake_case , _snake_case )
_lowerCAmelCase : Optional[Any] = patches.shape
_lowerCAmelCase : List[Any] = patches_shape[1]
_lowerCAmelCase : str = patches_shape[2]
_lowerCAmelCase : int = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
_lowerCAmelCase : List[Any] = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
_lowerCAmelCase : Tuple = torch.arange(_snake_case ).reshape([rows, 1] ).repeat(1 , _snake_case ).reshape([rows * columns, 1] )
_lowerCAmelCase : List[str] = torch.arange(_snake_case ).reshape([1, columns] ).repeat(_snake_case , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
_lowerCAmelCase : Tuple = row_ids.to(torch.floataa )
_lowerCAmelCase : Optional[int] = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
_lowerCAmelCase : int = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
_lowerCAmelCase : List[Any] = torch.nn.functional.pad(_snake_case , [0, 0, 0, max_patches - (rows * columns)] ).float()
_lowerCAmelCase : List[Any] = to_numpy_array(_snake_case )
return result
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None , **_snake_case ):
if image.dtype == np.uinta:
_lowerCAmelCase : Any = image.astype(np.floataa )
# take mean across the whole `image`
_lowerCAmelCase : str = np.mean(_snake_case )
_lowerCAmelCase : List[Any] = np.std(_snake_case )
_lowerCAmelCase : int = max(_snake_case , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(_snake_case , mean=_snake_case , std=_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ):
_lowerCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCAmelCase : Tuple = patch_size if patch_size is not None else self.patch_size
_lowerCAmelCase : Optional[int] = max_patches if max_patches is not None else self.max_patches
_lowerCAmelCase : List[str] = self.is_vqa
if kwargs.get("data_format" , _snake_case ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
_lowerCAmelCase : Union[str, Any] = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCAmelCase : Dict = [convert_to_rgb(_snake_case ) for image in images]
# All transformations expect numpy arrays.
_lowerCAmelCase : Tuple = [to_numpy_array(_snake_case ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
_lowerCAmelCase : Tuple = kwargs.pop("font_bytes" , _snake_case )
_lowerCAmelCase : List[Any] = kwargs.pop("font_path" , _snake_case )
if isinstance(_snake_case , _snake_case ):
_lowerCAmelCase : str = [header_text] * len(_snake_case )
_lowerCAmelCase : Union[str, Any] = [
render_header(_snake_case , header_text[i] , font_bytes=_snake_case , font_path=_snake_case )
for i, image in enumerate(_snake_case )
]
if do_normalize:
_lowerCAmelCase : Tuple = [self.normalize(image=_snake_case ) for image in images]
# convert to torch tensor and permute
_lowerCAmelCase : Dict = [
self.extract_flattened_patches(image=_snake_case , max_patches=_snake_case , patch_size=_snake_case )
for image in images
]
# create attention mask in numpy
_lowerCAmelCase : Optional[Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
_lowerCAmelCase : int = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=_snake_case )
return encoded_outputs
| 587 | 1 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> int:
debug_launcher(test_script.main )
def _lowerCamelCase ( self ) -> Dict:
debug_launcher(test_ops.main )
| 76 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a : str = 'pt'
elif is_tf_available():
a : Dict = 'tf'
else:
a : Optional[Any] = 'jax'
class lowercase(_lowercase , unittest.TestCase ):
__snake_case: List[Any] = PerceiverTokenizer
__snake_case: Optional[Any] = False
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
super().setUp()
a__ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self ) -> Any:
"""simple docstring"""
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase__ ( self , **__SCREAMING_SNAKE_CASE ) -> PerceiverTokenizer:
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2_0 , __SCREAMING_SNAKE_CASE=5 ) -> Tuple[str, list]:
"""simple docstring"""
a__ = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
try:
a__ = tokenizer.decode([i] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
a__ = list(filter(lambda __SCREAMING_SNAKE_CASE : re.match(R'^[ a-zA-Z]+$' , t[1] ) , __SCREAMING_SNAKE_CASE ) )
a__ = list(filter(lambda __SCREAMING_SNAKE_CASE : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) )
if max_length is not None and len(__SCREAMING_SNAKE_CASE ) > max_length:
a__ = toks[:max_length]
if min_length is not None and len(__SCREAMING_SNAKE_CASE ) < min_length and len(__SCREAMING_SNAKE_CASE ) > 0:
while len(__SCREAMING_SNAKE_CASE ) < min_length:
a__ = toks + toks
# toks_str = [t[1] for t in toks]
a__ = [t[0] for t in toks]
# Ensure consistency
a__ = tokenizer.decode(__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
if " " not in output_txt and len(__SCREAMING_SNAKE_CASE ) > 1:
a__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
)
if with_prefix_space:
a__ = ' ' + output_txt
a__ = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
return output_txt, output_ids
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
a__ = self.perceiver_tokenizer
a__ = 'Unicode €.'
a__ = tokenizer(__SCREAMING_SNAKE_CASE )
a__ = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'] , __SCREAMING_SNAKE_CASE )
# decoding
a__ = tokenizer.decode(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , '[CLS]Unicode €.[SEP]' )
a__ = tokenizer('e è é ê ë' )
a__ = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'] , __SCREAMING_SNAKE_CASE )
# decoding
a__ = tokenizer.decode(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
a__ = self.perceiver_tokenizer
a__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
a__ = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
a__ = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if FRAMEWORK != "jax":
a__ = list(batch.input_ids.numpy()[0] )
else:
a__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
a__ = self.perceiver_tokenizer
a__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
a__ = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , __SCREAMING_SNAKE_CASE )
self.assertIn('attention_mask' , __SCREAMING_SNAKE_CASE )
self.assertNotIn('decoder_input_ids' , __SCREAMING_SNAKE_CASE )
self.assertNotIn('decoder_attention_mask' , __SCREAMING_SNAKE_CASE )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ = self.perceiver_tokenizer
a__ = [
'Summary of the text.',
'Another summary.',
]
a__ = tokenizer(
text_target=__SCREAMING_SNAKE_CASE , max_length=3_2 , padding='max_length' , truncation=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
a__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
a__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a__ = tempfile.mkdtemp()
a__ = ' He is very happy, UNwant\u00E9d,running'
a__ = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
a__ = tokenizer.__class__.from_pretrained(__SCREAMING_SNAKE_CASE )
a__ = after_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
a__ = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a__ = tempfile.mkdtemp()
a__ = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
a__ = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
a__ = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
a__ = tokenizer.__class__.from_pretrained(__SCREAMING_SNAKE_CASE )
a__ = after_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
a__ = tokenizer.__class__.from_pretrained(__SCREAMING_SNAKE_CASE , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
a__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
a__ = json.load(__SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
a__ = json.load(__SCREAMING_SNAKE_CASE )
a__ = [f'<extra_id_{i}>' for i in range(1_2_5 )]
a__ = added_tokens_extra_ids + [
'an_additional_special_token'
]
a__ = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(__SCREAMING_SNAKE_CASE , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a__ = tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a__ = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=__SCREAMING_SNAKE_CASE )]
a__ = tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '�' )
def lowercase__ ( self ) -> str:
"""simple docstring"""
pass
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
pass
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
pass
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
pass
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ = self.get_tokenizers(fast=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
a__ = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
a__ = tokenizer.convert_tokens_to_string(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 273 | 0 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_lowercase ):
__lowercase = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
__lowercase = FlaxAutoModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_lowercase ):
__lowercase = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
__lowercase = FlaxAutoModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__lowercase = AutoTokenizer.from_pretrained(_lowercase )
__lowercase = FlaxBertModel.from_pretrained(_lowercase )
__lowercase = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase__ ):
return model(**_lowercase )
eval(**_lowercase ).block_until_ready()
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
__lowercase = AutoTokenizer.from_pretrained(_lowercase )
__lowercase = FlaxRobertaModel.from_pretrained(_lowercase )
__lowercase = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase__ ):
return model(**_lowercase )
eval(**_lowercase ).block_until_ready()
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(
_lowercase , '''bert-base is not a local folder and is not a valid model identifier''' ):
__lowercase = FlaxAutoModel.from_pretrained('''bert-base''' )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
with self.assertRaisesRegex(
_lowercase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__lowercase = FlaxAutoModel.from_pretrained(_lowercase , revision='''aaaaaa''' )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
_lowercase , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ):
__lowercase = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(_lowercase , '''Use `from_pt=True` to load this model''' ):
__lowercase = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' ) | 707 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__a : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__a : Any = TaTokenizerFast
__a : Any = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Any = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Union[str, Any] = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Any = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__a : int = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
) | 522 | 0 |
from manim import *
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =Rectangle(height=0.5, width=0.5)
_lowercase =Rectangle(height=0.4_6, width=0.4_6).set_stroke(width=0)
_lowercase =[mem.copy() for i in range(6)]
_lowercase =[mem.copy() for i in range(6)]
_lowercase =VGroup(*snake_case).arrange(snake_case, buff=0)
_lowercase =VGroup(*snake_case).arrange(snake_case, buff=0)
_lowercase =VGroup(snake_case, snake_case).arrange(snake_case, buff=0)
_lowercase =Text('CPU', font_size=24)
_lowercase =Group(snake_case, snake_case).arrange(snake_case, buff=0.5, aligned_edge=snake_case)
cpu.move_to([-2.5, -0.5, 0])
self.add(snake_case)
_lowercase =[mem.copy() for i in range(1)]
_lowercase =VGroup(*snake_case).arrange(snake_case, buff=0)
_lowercase =Text('GPU', font_size=24)
_lowercase =Group(snake_case, snake_case).arrange(snake_case, buff=0.5, aligned_edge=snake_case)
gpu.align_to(snake_case, snake_case)
gpu.set_x(gpu.get_x() - 1)
self.add(snake_case)
_lowercase =[mem.copy() for i in range(6)]
_lowercase =VGroup(*snake_case).arrange(snake_case, buff=0)
_lowercase =Text('Model', font_size=24)
_lowercase =Group(snake_case, snake_case).arrange(snake_case, buff=0.5, aligned_edge=snake_case)
model.move_to([3, -1.0, 0])
self.play(
Create(snake_case, run_time=1), Create(snake_case, run_time=1), Create(snake_case, run_time=1), )
_lowercase =MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''', font_size=24, )
_lowercase =Square(side_length=2.2)
key.move_to([-5, 2, 0])
_lowercase =MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''', font_size=18, )
key_text.move_to([-5, 2.4, 0])
step_a.move_to([2, 2, 0])
self.play(Write(snake_case, run_time=2.5), Write(snake_case), Write(snake_case))
self.add(snake_case)
_lowercase =[]
_lowercase =[]
_lowercase =[]
for i, rect in enumerate(snake_case):
_lowercase =Rectangle(height=0.4_6, width=0.4_6).set_stroke(width=0.0).set_fill(snake_case, opacity=0.7)
cpu_target.move_to(snake_case)
cpu_target.generate_target()
_lowercase =0.4_6 / 4
_lowercase =0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT), buff=0.0_2, direction=snake_case)
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1)
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target, direction=snake_case, buff=0.0)
else:
cpu_target.target.next_to(cpu_targs[i - 1].target, direction=snake_case, buff=0.0)
cpu_targs.append(snake_case)
first_animations.append(rect.animate(run_time=0.5).set_stroke(snake_case))
second_animations.append(MoveToTarget(snake_case, run_time=1.5))
self.play(*snake_case)
self.play(*snake_case)
self.wait()
| 181 |
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE_ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self :Dict, snake_case :str="sayef/fsner-bert-base-uncased"):
"""simple docstring"""
super(snake_case, self).__init__()
_lowercase =AutoModel.from_pretrained(snake_case, return_dict=snake_case)
_lowercase =torch.nn.CosineSimilarity(3, 1e-0_8)
_lowercase =torch.nn.Softmax(dim=1)
def UpperCamelCase__ ( self :str, **snake_case :int):
"""simple docstring"""
return self.bert(**snake_case).last_hidden_state
def UpperCamelCase__ ( self :Union[str, Any], snake_case :Optional[Any]):
"""simple docstring"""
return token_embeddings.sum(2, keepdim=snake_case)
def UpperCamelCase__ ( self :List[Any], snake_case :int, snake_case :Dict, snake_case :Dict=1):
"""simple docstring"""
return self.softmax(T * self.cos(snake_case, snake_case))
def UpperCamelCase__ ( self :List[str], snake_case :int, snake_case :List[str]):
"""simple docstring"""
_lowercase =W_supports['sizes'].tolist()
_lowercase =W_supports['start_token_id'].item()
_lowercase =W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_lowercase =self.BERT(**snake_case)
_lowercase =self.BERT(**snake_case)
_lowercase =None
_lowercase =None
_lowercase =W_supports['input_ids'] == start_token_id
_lowercase =W_supports['input_ids'] == end_token_id
for i, size in enumerate(snake_case):
if i == 0:
_lowercase =0
else:
_lowercase =support_sizes[i - 1]
_lowercase =S[s : s + size][start_token_masks[s : s + size]]
_lowercase =S[s : s + size][end_token_masks[s : s + size]]
_lowercase =torch.matmul(q[i], s_start.T).sum(1).softmax(0)
_lowercase =torch.matmul(q[i], s_end.T).sum(1).softmax(0)
if p_starts is not None:
_lowercase =torch.vstack((p_starts, p_start))
_lowercase =torch.vstack((p_ends, p_end))
else:
_lowercase =p_start
_lowercase =p_end
return p_starts, p_ends
| 181 | 1 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowercase = 50003
lowercase = 50002
@require_sentencepiece
@require_tokenizers
class A_ ( snake_case_ , unittest.TestCase ):
UpperCAmelCase__ = PLBartTokenizer
UpperCAmelCase__ = None
UpperCAmelCase__ = False
def _snake_case ( self : Union[str, Any] ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ = PLBartTokenizer(__lowerCamelCase , language_codes="base" , keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : int ) -> Dict:
__magic_name__ = PLBartTokenizer(__lowerCamelCase , language_codes="base" , keep_accents=__lowerCamelCase )
__magic_name__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__magic_name__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__magic_name__ = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
__magic_name__ = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
__magic_name__ = tokenizer.vocab_size
__magic_name__ = [tokenizer.convert_ids_to_tokens(__lowerCamelCase ) for x in range(end - 4 , __lowerCamelCase )]
self.assertListEqual(__lowerCamelCase , ["__java__", "__python__", "__en_XX__", "<mask>"] )
__magic_name__ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
__magic_name__ = tokenizer(__lowerCamelCase ).input_ids
self.assertEqual(
tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase ) , __lowerCamelCase , )
def _snake_case ( self : Union[str, Any] ) -> str:
__magic_name__ = PLBartTokenizer(__lowerCamelCase , language_codes="multi" , keep_accents=__lowerCamelCase )
__magic_name__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__magic_name__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__magic_name__ = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
__magic_name__ = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
__magic_name__ = tokenizer.vocab_size
__magic_name__ = [tokenizer.convert_ids_to_tokens(__lowerCamelCase ) for x in range(end - 7 , __lowerCamelCase )]
self.assertListEqual(
__lowerCamelCase , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
__magic_name__ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
__magic_name__ = tokenizer(__lowerCamelCase ).input_ids
self.assertEqual(
tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase ) , __lowerCamelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
UpperCAmelCase__ = '''uclanlp/plbart-python-en_XX'''
UpperCAmelCase__ = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
UpperCAmelCase__ = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
UpperCAmelCase__ = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def _snake_case ( cls : List[str] ) -> List[str]:
__magic_name__ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
__magic_name__ = 1
return cls
def _snake_case ( self : Union[str, Any] ) -> Tuple:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 )
def _snake_case ( self : List[Any] ) -> Tuple:
__magic_name__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase )
def _snake_case ( self : Tuple ) -> Dict:
self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids )
__magic_name__ = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
__magic_name__ = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
__magic_name__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase )
def _snake_case ( self : List[str] ) -> Union[str, Any]:
__magic_name__ = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , __lowerCamelCase )
__magic_name__ = 1_0
__magic_name__ = self.tokenizer(__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
def _snake_case ( self : Any ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] )
def _snake_case ( self : Optional[int] ) -> Any:
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCamelCase )
__magic_name__ = PLBartTokenizer.from_pretrained(__lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCamelCase )
@require_torch
def _snake_case ( self : Union[str, Any] ) -> Dict:
__magic_name__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , return_tensors="pt" )
__magic_name__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __lowerCamelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def _snake_case ( self : str ) -> Union[str, Any]:
__magic_name__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
__magic_name__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
__magic_name__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def _snake_case ( self : List[Any] ) -> List[str]:
__magic_name__ = self.tokenizer(self.src_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=3 , return_tensors="pt" )
__magic_name__ = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=1_0 , return_tensors="pt" )
__magic_name__ = targets["input_ids"]
__magic_name__ = shift_tokens_right(__lowerCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def _snake_case ( self : Tuple ) -> List[Any]:
__magic_name__ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 468 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( snake_case_ ):
UpperCAmelCase__ = (UnCLIPScheduler,)
def _snake_case ( self : Any , **__lowerCamelCase : List[Any] ) -> str:
__magic_name__ = {
"num_train_timesteps": 1_0_0_0,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**__lowerCamelCase )
return config
def _snake_case ( self : int ) -> Tuple:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def _snake_case ( self : int ) -> Any:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__lowerCamelCase )
def _snake_case ( self : List[Any] ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def _snake_case ( self : Tuple ) -> Optional[int]:
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=__lowerCamelCase )
def _snake_case ( self : List[Any] ) -> Any:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def _snake_case ( self : Union[str, Any] ) -> List[str]:
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__lowerCamelCase , prev_timestep=__lowerCamelCase )
def _snake_case ( self : Union[str, Any] ) -> Tuple:
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config(variance_type="fixed_small_log" )
__magic_name__ = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.999_4987 ) ) < 1e-5
def _snake_case ( self : Union[str, Any] ) -> Optional[Any]:
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config(variance_type="learned_range" )
__magic_name__ = scheduler_class(**__lowerCamelCase )
__magic_name__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=__lowerCamelCase ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(4_8_7 , predicted_variance=__lowerCamelCase ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(9_9_9 , predicted_variance=__lowerCamelCase ) - -0.001_0011 < 1e-5
def _snake_case ( self : Optional[Any] ) -> str:
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**__lowerCamelCase )
__magic_name__ = scheduler.timesteps
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter
__magic_name__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
__magic_name__ = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
__magic_name__ = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
__magic_name__ = pred_prev_sample
__magic_name__ = torch.sum(torch.abs(__lowerCamelCase ) )
__magic_name__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def _snake_case ( self : int ) -> int:
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(2_5 )
__magic_name__ = scheduler.timesteps
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter
__magic_name__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
__magic_name__ = model(__lowerCamelCase , __lowerCamelCase )
if i + 1 == timesteps.shape[0]:
__magic_name__ = None
else:
__magic_name__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__magic_name__ = scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , prev_timestep=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
__magic_name__ = pred_prev_sample
__magic_name__ = torch.sum(torch.abs(__lowerCamelCase ) )
__magic_name__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def _snake_case ( self : List[str] ) -> Any:
pass
def _snake_case ( self : int ) -> Optional[int]:
pass
| 468 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> Optional[Any]:
if config_name_or_path is None:
lowerCamelCase_ = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
lowerCamelCase_ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase_ = question_encoder_name_or_path
lowerCamelCase_ = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
lowerCamelCase_ = RagConfig.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = AutoConfig.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = AutoConfig.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = gen_config
lowerCamelCase_ = question_encoder_config
lowerCamelCase_ = model_class.from_pretrained_question_encoder_generator(
__UpperCamelCase ,__UpperCamelCase ,config=__UpperCamelCase )
rag_model.save_pretrained(__UpperCamelCase )
# Sanity check.
model_class.from_pretrained(__UpperCamelCase )
# Save tokenizers.
lowerCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
lowerCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
A_ = parser.parse_args()
A_ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 42 |
'''simple docstring'''
from math import isclose, sqrt
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> tuple[float, float, float]:
lowerCamelCase_ = point_y / 4 / point_x
lowerCamelCase_ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase_ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase_ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase_ = outgoing_gradient**2 + 4
lowerCamelCase_ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase_ = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
lowerCamelCase_ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCamelCase_ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase_ = x_minus if isclose(__UpperCamelCase ,__UpperCamelCase ) else x_plus
lowerCamelCase_ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _UpperCamelCase ( __UpperCamelCase = 1.4 ,__UpperCamelCase = -9.6 ) -> int:
lowerCamelCase_ = 0
lowerCamelCase_ = first_x_coord
lowerCamelCase_ = first_y_coord
lowerCamelCase_ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = next_point(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 | 1 |
"""simple docstring"""
def _UpperCamelCase ( _A , _A ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def _UpperCamelCase ( ) -> None:
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1)) | 19 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
@property
def _snake_case ( self : Dict ) ->int:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__UpperCamelCase )
def _snake_case ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.dummy_uncond_unet
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = self.dummy_vq_model
_UpperCAmelCase = LDMPipeline(unet=__UpperCamelCase , vqvae=__UpperCamelCase , scheduler=__UpperCamelCase )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" ).images
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" , return_dict=__UpperCamelCase )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class a_ ( unittest.TestCase ):
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=5 , output_type="""numpy""" ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_UpperCAmelCase = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance | 19 | 1 |
_lowerCamelCase = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_lowerCamelCase = [{'type': 'code', 'content': INSTALL_CONTENT}]
_lowerCamelCase = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
} | 6 |
'''simple docstring'''
import random
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = a[left_index]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = left_index + 1
for j in range(left_index + 1 , SCREAMING_SNAKE_CASE_ ):
if a[j] < pivot:
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = a[i], a[j]
i += 1
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = a[i - 1], a[left_index]
return i - 1
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
"""simple docstring"""
if left < right:
SCREAMING_SNAKE_CASE_ : Tuple = random.randint(SCREAMING_SNAKE_CASE_ , right - 1 )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[str] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
SCREAMING_SNAKE_CASE_ : List[str] = partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
quick_sort_random(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
SCREAMING_SNAKE_CASE_ , pivot_index + 1 , SCREAMING_SNAKE_CASE_ ) # recursive quicksort to the right of the pivot point
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = input("Enter numbers separated by a comma:\n" ).strip()
SCREAMING_SNAKE_CASE_ : str = [int(SCREAMING_SNAKE_CASE_ ) for item in user_input.split("," )]
quick_sort_random(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 421 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = AudioLDMPipeline
_lowerCAmelCase = TEXT_TO_AUDIO_PARAMS
_lowerCAmelCase = TEXT_TO_AUDIO_BATCH_PARAMS
_lowerCAmelCase = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_lowercase , )
__a : Optional[int] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__a : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__a : str = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
__a : Any = ClapTextModelWithProjection(_lowercase )
__a : Optional[int] = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
__a : Tuple = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_lowercase , )
__a : int = SpeechTaHifiGan(_lowercase )
__a : Union[str, Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def lowerCAmelCase__(self , _lowercase , _lowercase=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("""mps""" ):
__a : Any = torch.manual_seed(_lowercase )
else:
__a : int = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__a : List[Any] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a : int = self.get_dummy_components()
__a : str = AudioLDMPipeline(**_lowercase )
__a : Union[str, Any] = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__a : Union[str, Any] = self.get_dummy_inputs(_lowercase )
__a : Optional[int] = audioldm_pipe(**_lowercase )
__a : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(_lowercase ) == 256
__a : Union[str, Any] = audio[:10]
__a : Dict = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = self.get_dummy_components()
__a : List[str] = AudioLDMPipeline(**_lowercase )
__a : Dict = audioldm_pipe.to(_lowercase )
__a : List[str] = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__a : List[str] = self.get_dummy_inputs(_lowercase )
__a : Any = 3 * [inputs["""prompt"""]]
# forward
__a : List[str] = audioldm_pipe(**_lowercase )
__a : Tuple = output.audios[0]
__a : int = self.get_dummy_inputs(_lowercase )
__a : Optional[Any] = 3 * [inputs.pop("""prompt""" )]
__a : str = audioldm_pipe.tokenizer(
_lowercase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_lowercase , return_tensors="""pt""" , )
__a : Tuple = text_inputs["""input_ids"""].to(_lowercase )
__a : str = audioldm_pipe.text_encoder(
_lowercase , )
__a : Optional[int] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__a : List[str] = F.normalize(_lowercase , dim=-1 )
__a : List[Any] = prompt_embeds
# forward
__a : Dict = audioldm_pipe(**_lowercase )
__a : Dict = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = self.get_dummy_components()
__a : Any = AudioLDMPipeline(**_lowercase )
__a : Optional[Any] = audioldm_pipe.to(_lowercase )
__a : Dict = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__a : List[Any] = self.get_dummy_inputs(_lowercase )
__a : Optional[int] = 3 * ["""this is a negative prompt"""]
__a : Union[str, Any] = negative_prompt
__a : Dict = 3 * [inputs["""prompt"""]]
# forward
__a : Any = audioldm_pipe(**_lowercase )
__a : Union[str, Any] = output.audios[0]
__a : Dict = self.get_dummy_inputs(_lowercase )
__a : Optional[Any] = 3 * [inputs.pop("""prompt""" )]
__a : Union[str, Any] = []
for p in [prompt, negative_prompt]:
__a : str = audioldm_pipe.tokenizer(
_lowercase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_lowercase , return_tensors="""pt""" , )
__a : Optional[int] = text_inputs["""input_ids"""].to(_lowercase )
__a : Optional[int] = audioldm_pipe.text_encoder(
_lowercase , )
__a : Optional[int] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__a : Tuple = F.normalize(_lowercase , dim=-1 )
embeds.append(_lowercase )
__a : int = embeds
# forward
__a : List[Any] = audioldm_pipe(**_lowercase )
__a : int = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a : int = self.get_dummy_components()
__a : Tuple = PNDMScheduler(skip_prk_steps=_lowercase )
__a : Optional[int] = AudioLDMPipeline(**_lowercase )
__a : Optional[int] = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__a : Optional[int] = self.get_dummy_inputs(_lowercase )
__a : Any = """egg cracking"""
__a : Optional[Any] = audioldm_pipe(**_lowercase , negative_prompt=_lowercase )
__a : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(_lowercase ) == 256
__a : List[Any] = audio[:10]
__a : Optional[Any] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a : str = self.get_dummy_components()
__a : Optional[int] = PNDMScheduler(skip_prk_steps=_lowercase )
__a : Any = AudioLDMPipeline(**_lowercase )
__a : Optional[int] = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__a : List[str] = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
__a : Optional[int] = audioldm_pipe(_lowercase , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
__a : List[str] = 2
__a : int = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
__a : str = 2
__a : Tuple = audioldm_pipe(_lowercase , num_inference_steps=2 , num_waveforms_per_prompt=_lowercase ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
__a : List[Any] = 2
__a : List[Any] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_lowercase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a : str = self.get_dummy_components()
__a : Tuple = AudioLDMPipeline(**_lowercase )
__a : int = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__a : Union[str, Any] = audioldm_pipe.vocoder.config.sampling_rate
__a : List[str] = self.get_dummy_inputs(_lowercase )
__a : str = audioldm_pipe(audio_length_in_s=0.016 , **_lowercase )
__a : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(_lowercase ) / vocoder_sampling_rate == 0.016
__a : List[str] = audioldm_pipe(audio_length_in_s=0.032 , **_lowercase )
__a : Dict = output.audios[0]
assert audio.ndim == 1
assert len(_lowercase ) / vocoder_sampling_rate == 0.032
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = self.get_dummy_components()
__a : Dict = AudioLDMPipeline(**_lowercase )
__a : List[str] = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__a : Optional[int] = ["""hey"""]
__a : List[Any] = audioldm_pipe(_lowercase , num_inference_steps=1 )
__a : List[Any] = output.audios.shape
assert audio_shape == (1, 256)
__a : Union[str, Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
__a : List[Any] = SpeechTaHifiGan(_lowercase ).to(_lowercase )
__a : Dict = audioldm_pipe(_lowercase , num_inference_steps=1 )
__a : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def lowerCAmelCase__(self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=_lowercase )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__(self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_lowercase )
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__(self , _lowercase , _lowercase="cpu" , _lowercase=torch.floataa , _lowercase=0 ):
'''simple docstring'''
__a : List[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__a : int = np.random.RandomState(_lowercase ).standard_normal((1, 8, 128, 16) )
__a : str = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
__a : Dict = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__a : Dict = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__a : str = self.get_inputs(_lowercase )
__a : Dict = 25
__a : Optional[int] = audioldm_pipe(**_lowercase ).audios[0]
assert audio.ndim == 1
assert len(_lowercase ) == 81920
__a : List[Any] = audio[77230:77240]
__a : Any = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
__a : Optional[Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__a : int = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
__a : Union[str, Any] = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
__a : Optional[int] = self.get_inputs(_lowercase )
__a : Union[str, Any] = audioldm_pipe(**_lowercase ).audios[0]
assert audio.ndim == 1
assert len(_lowercase ) == 81920
__a : int = audio[27780:27790]
__a : List[Any] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
__a : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 708 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 0 |
__a :str = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
__a :List[str] = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = from_type.lower().strip("s" )
A_ = to_type.lower().strip("s" )
A_ = UNIT_SYMBOL.get(__UpperCamelCase ,__UpperCamelCase )
A_ = UNIT_SYMBOL.get(__UpperCamelCase ,__UpperCamelCase )
if from_sanitized not in METRIC_CONVERSION:
A_ = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(__UpperCamelCase )}'''
)
raise ValueError(__UpperCamelCase )
if to_sanitized not in METRIC_CONVERSION:
A_ = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(__UpperCamelCase )}'''
)
raise ValueError(__UpperCamelCase )
A_ = METRIC_CONVERSION[from_sanitized]
A_ = METRIC_CONVERSION[to_sanitized]
A_ = 1
if from_exponent > to_exponent:
A_ = from_exponent - to_exponent
else:
A_ = -(to_exponent - from_exponent)
return value * pow(10 ,__UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod() | 86 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__a :Any = logging.getLogger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=None ):
super().__init__(
UpperCAmelCase , question_encoder_tokenizer=UpperCAmelCase , generator_tokenizer=UpperCAmelCase , index=UpperCAmelCase , init_retrieval=UpperCAmelCase , )
A_ = None
def __A ( self : Dict , UpperCAmelCase : int ):
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
A_ = self._infer_socket_ifname()
# avoid clash with the NCCL port
A_ = str(distributed_port + 1 )
A_ = dist.new_group(ranks=UpperCAmelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __A ( self : List[str] ):
return dist.get_rank(group=self.process_group ) == 0
def __A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict=torch.floataa ):
A_ = torch.empty(UpperCAmelCase , dtype=UpperCAmelCase )
dist.scatter(UpperCAmelCase , src=0 , scatter_list=UpperCAmelCase , group=self.process_group )
return target_tensor
def __A ( self : Any ):
A_ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
A_ = next((addr for addr in addrs if addr.startswith("e" )) , UpperCAmelCase )
return ifname
def __A ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : int ):
# single GPU training
if not dist.is_initialized():
A_ , A_ = self._main_retrieve(UpperCAmelCase , UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase )
# distributed training
A_ = dist.get_world_size(group=self.process_group )
# gather logic
A_ = None
if self._is_main():
A_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCAmelCase )]
dist.gather(torch.tensor(UpperCAmelCase ) , dst=0 , gather_list=UpperCAmelCase , group=self.process_group )
# scatter logic
A_ = question_hidden_states.shape[0]
A_ = []
A_ = []
if self._is_main():
assert len(UpperCAmelCase ) == world_size
A_ , A_ = self._main_retrieve(torch.cat(UpperCAmelCase ).numpy() , UpperCAmelCase )
A_ , A_ = torch.tensor(UpperCAmelCase ), torch.tensor(UpperCAmelCase )
A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase )
A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase )
A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCAmelCase ) | 86 | 1 |
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
SCREAMING_SNAKE_CASE__ : str = datasets.load_iris()
SCREAMING_SNAKE_CASE__ : Dict = np.array(data["data"])
SCREAMING_SNAKE_CASE__ : Dict = np.array(data["target"])
SCREAMING_SNAKE_CASE__ : Optional[int] = data["target_names"]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = train_test_split(X, y)
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]:
return np.linalg.norm(np.array(UpperCAmelCase__ ) - np.array(UpperCAmelCase__ ) )
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=5 ) -> Optional[Any]:
a : List[str] = zip(UpperCAmelCase__ , UpperCAmelCase__ )
# List of distances of all points from the point to be classified
a : str = []
for data_point in data:
a : Union[str, Any] = euclidean_distance(data_point[0] , UpperCAmelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
a : List[str] = [i[1] for i in sorted(UpperCAmelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
a : Dict = Counter(UpperCAmelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 509 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class A_ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase_ ( self ) -> Any:
a , a : str = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
a : List[Any] = 'A painting of a squirrel eating a burger'
a : Union[str, Any] = jax.device_count()
a : Optional[int] = num_samples * [prompt]
a : Tuple = sd_pipe.prepare_inputs(__UpperCAmelCase )
a : Dict = replicate(__UpperCAmelCase )
a : int = shard(__UpperCAmelCase )
a : str = jax.random.PRNGKey(0 )
a : List[Any] = jax.random.split(__UpperCAmelCase , jax.device_count() )
a : Optional[int] = sd_pipe(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_inference_steps=25 , jit=__UpperCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
a : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a : Optional[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
a : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a : str = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowercase_ ( self ) -> Union[str, Any]:
a : str = 'stabilityai/stable-diffusion-2'
a , a : Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(__UpperCAmelCase , subfolder='scheduler' )
a , a : Any = FlaxStableDiffusionPipeline.from_pretrained(
__UpperCAmelCase , scheduler=__UpperCAmelCase , revision='bf16' , dtype=jnp.bfloataa , )
a : Union[str, Any] = scheduler_params
a : Any = 'A painting of a squirrel eating a burger'
a : Any = jax.device_count()
a : str = num_samples * [prompt]
a : Optional[Any] = sd_pipe.prepare_inputs(__UpperCAmelCase )
a : Optional[Any] = replicate(__UpperCAmelCase )
a : Union[str, Any] = shard(__UpperCAmelCase )
a : Optional[int] = jax.random.PRNGKey(0 )
a : str = jax.random.split(__UpperCAmelCase , jax.device_count() )
a : Tuple = sd_pipe(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_inference_steps=25 , jit=__UpperCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
a : Optional[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a : List[str] = images[0, 2_53:2_56, 2_53:2_56, -1]
a : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a : Tuple = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 509 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ : Optional[int] = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Dict = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
UpperCamelCase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 115 |
"""simple docstring"""
def A_ (__a ):
'''simple docstring'''
A_ = len(__a )
while cur > 1:
# Find the maximum number in arr
A_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
A_ = arr[mi::-1] + arr[mi + 1 : len(__a )]
# Reverse whole list
A_ = arr[cur - 1 :: -1] + arr[cur : len(__a )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCamelCase_ : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase_ : Any = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 115 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class _lowerCAmelCase ( __a ):
_lowercase ='''roc_bert'''
def __init__( self , _UpperCamelCase=30_522 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3_072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-1_2 , _UpperCamelCase=True , _UpperCamelCase=0 , _UpperCamelCase="absolute" , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=768 , _UpperCamelCase=910 , _UpperCamelCase=512 , _UpperCamelCase=24_858 , _UpperCamelCase=True , **_UpperCamelCase , ) -> int:
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = enable_pronunciation
lowerCAmelCase_ = enable_shape
lowerCAmelCase_ = pronunciation_embed_dim
lowerCAmelCase_ = pronunciation_vocab_size
lowerCAmelCase_ = shape_embed_dim
lowerCAmelCase_ = shape_vocab_size
lowerCAmelCase_ = concat_input
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = classifier_dropout
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
| 279 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_A = get_logger(__name__)
_A = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class _lowerCAmelCase :
@add_start_docstrings(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _lowerCAmelCase :
@add_start_docstrings(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _lowerCAmelCase ( __a ):
@add_start_docstrings(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> jnp.ndarray:
for processor in self:
lowerCAmelCase_ = inspect.signature(processor.__call__ ).parameters
if len(_UpperCamelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f"""Make sure that all the required parameters: {list(function_args.keys() )} for """
f"""{processor.__class__} are passed to the logits processor.""" )
lowerCAmelCase_ = processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
else:
lowerCAmelCase_ = processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase ) -> Tuple:
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not (temperature > 0):
raise ValueError(f"""`temperature` has to be a strictly positive float, but is {temperature}""" )
lowerCAmelCase_ = temperature
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
lowerCAmelCase_ = scores / self.temperature
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase = -float("Inf" ) , _UpperCamelCase = 1 ) -> Union[str, Any]:
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or (min_tokens_to_keep < 1):
raise ValueError(f"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
lowerCAmelCase_ = top_p
lowerCAmelCase_ = filter_value
lowerCAmelCase_ = min_tokens_to_keep
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
lowerCAmelCase_ , lowerCAmelCase_ = lax.top_k(_UpperCamelCase , scores.shape[-1] )
lowerCAmelCase_ = jnp.full_like(_UpperCamelCase , self.filter_value )
lowerCAmelCase_ = jax.nn.softmax(_UpperCamelCase , axis=-1 ).cumsum(axis=-1 )
lowerCAmelCase_ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowerCAmelCase_ = jnp.roll(_UpperCamelCase , 1 )
score_mask |= score_mask.at[:, 0].set(_UpperCamelCase )
# min tokens to keep
lowerCAmelCase_ = score_mask.at[:, : self.min_tokens_to_keep].set(_UpperCamelCase )
lowerCAmelCase_ = jnp.where(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = jax.lax.sort_key_val(_UpperCamelCase , _UpperCamelCase )[-1]
return next_scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase = -float("Inf" ) , _UpperCamelCase = 1 ) -> List[Any]:
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or top_k <= 0:
raise ValueError(f"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
lowerCAmelCase_ = max(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = filter_value
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
lowerCAmelCase_ , lowerCAmelCase_ = scores.shape
lowerCAmelCase_ = jnp.full(batch_size * vocab_size , self.filter_value )
lowerCAmelCase_ = min(self.top_k , scores.shape[-1] ) # Safety check
lowerCAmelCase_ , lowerCAmelCase_ = lax.top_k(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = jnp.broadcast_to((jnp.arange(_UpperCamelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
lowerCAmelCase_ = topk_scores.flatten()
lowerCAmelCase_ = topk_indices.flatten() + shift
lowerCAmelCase_ = next_scores_flat.at[topk_indices_flat].set(_UpperCamelCase )
lowerCAmelCase_ = next_scores_flat.reshape(_UpperCamelCase , _UpperCamelCase )
return next_scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase ) -> Any:
lowerCAmelCase_ = bos_token_id
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
lowerCAmelCase_ = jnp.full(scores.shape , -float("inf" ) )
lowerCAmelCase_ = 1 - jnp.bool_(cur_len - 1 )
lowerCAmelCase_ = jnp.where(_UpperCamelCase , new_scores.at[:, self.bos_token_id].set(0 ) , _UpperCamelCase )
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
lowerCAmelCase_ = max_length
lowerCAmelCase_ = eos_token_id
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
lowerCAmelCase_ = jnp.full(scores.shape , -float("inf" ) )
lowerCAmelCase_ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowerCAmelCase_ = jnp.where(_UpperCamelCase , new_scores.at[:, self.eos_token_id].set(0 ) , _UpperCamelCase )
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or min_length < 0:
raise ValueError(f"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or eos_token_id < 0:
raise ValueError(f"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
lowerCAmelCase_ = min_length
lowerCAmelCase_ = eos_token_id
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
lowerCAmelCase_ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
lowerCAmelCase_ = jnp.where(_UpperCamelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , _UpperCamelCase )
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> int:
lowerCAmelCase_ = list(_UpperCamelCase )
lowerCAmelCase_ = begin_index
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
lowerCAmelCase_ = 1 - jnp.bool_(cur_len - self.begin_index )
lowerCAmelCase_ = jnp.where(_UpperCamelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , _UpperCamelCase )
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase_ = list(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
lowerCAmelCase_ = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase ) -> List[Any]:
lowerCAmelCase_ = dict(_UpperCamelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowerCAmelCase_ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowerCAmelCase_ = force_token_array.at[index].set(_UpperCamelCase )
lowerCAmelCase_ = jnp.intaa(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
def _force_token(_UpperCamelCase ):
lowerCAmelCase_ = scores.shape[0]
lowerCAmelCase_ = self.force_token_array[generation_idx]
lowerCAmelCase_ = jnp.ones_like(_UpperCamelCase , dtype=scores.dtype ) * -float("inf" )
lowerCAmelCase_ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
lowerCAmelCase_ = lax.dynamic_update_slice(_UpperCamelCase , _UpperCamelCase , (0, current_token) )
return new_scores
lowerCAmelCase_ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_UpperCamelCase ) , lambda: scores , ) , )
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
lowerCAmelCase_ = generate_config.eos_token_id
lowerCAmelCase_ = generate_config.no_timestamps_token_id
lowerCAmelCase_ = generate_config.no_timestamps_token_id + 1
lowerCAmelCase_ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_UpperCamelCase , "max_initial_timestamp_index" ):
lowerCAmelCase_ = generate_config.max_initial_timestamp_index
else:
lowerCAmelCase_ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowerCAmelCase_ = model_config.vocab_size
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
# suppress <|notimestamps|> which is handled by without_timestamps
lowerCAmelCase_ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(_UpperCamelCase , _UpperCamelCase ):
lowerCAmelCase_ = jnp.where((cur_len - self.begin_index) >= 1 , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _UpperCamelCase , )
lowerCAmelCase_ = jnp.where((cur_len - self.begin_index) < 2 , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _UpperCamelCase , _UpperCamelCase , )
return jnp.where(
_UpperCamelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , _UpperCamelCase , )
lowerCAmelCase_ = jax.vmap(_UpperCamelCase )(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = jnp.where(cur_len == self.begin_index , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _UpperCamelCase , )
lowerCAmelCase_ = self.timestamp_begin + self.max_initial_timestamp_index
lowerCAmelCase_ = jnp.where(
_UpperCamelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , _UpperCamelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
lowerCAmelCase_ = jax.nn.log_softmax(_UpperCamelCase , axis=-1 )
def handle_cumulative_probs(_UpperCamelCase , _UpperCamelCase ):
lowerCAmelCase_ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
lowerCAmelCase_ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , _UpperCamelCase , )
lowerCAmelCase_ = jax.vmap(_UpperCamelCase )(_UpperCamelCase , _UpperCamelCase )
return scores
| 279 | 1 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = """ylacombe/bark-small"""
lowercase_ = tempfile.mkdtemp()
lowercase_ = """en_speaker_1"""
lowercase_ = """This is a test string"""
lowercase_ = """speaker_embeddings_path.json"""
lowercase_ = """speaker_embeddings"""
def lowerCamelCase__ ( self : Any , **lowercase_ : Optional[int] ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = self.get_tokenizer()
lowercase_ = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase_ = 35
lowercase_ = 2
lowercase_ = 8
lowercase_ = {
"""semantic_prompt""": np.ones(lowercase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase_ = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase_ = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowercase_ , **lowercase_ )
lowercase_ = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.get_tokenizer()
lowercase_ = BarkProcessor(tokenizer=lowercase_ )
lowercase_ = processor(text=self.input_string )
lowercase_ = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 451 | '''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ ) ->bool:
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 451 | 1 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any]=13 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Dict=99 , lowerCAmelCase_ : Tuple=32 , lowerCAmelCase_ : Union[str, Any]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : Any=37 , lowerCAmelCase_ : str="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Any=5_12 , lowerCAmelCase_ : Optional[Any]=16 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Optional[int]="None" , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=None , ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] =parent
A__ : List[Any] =batch_size
A__ : Any =seq_length
A__ : List[str] =is_training
A__ : str =use_input_mask
A__ : List[Any] =use_token_type_ids
A__ : Union[str, Any] =use_labels
A__ : str =vocab_size
A__ : Tuple =hidden_size
A__ : Dict =num_hidden_layers
A__ : str =num_attention_heads
A__ : int =intermediate_size
A__ : int =hidden_act
A__ : List[Any] =hidden_dropout_prob
A__ : str =attention_probs_dropout_prob
A__ : Dict =max_position_embeddings
A__ : List[str] =type_vocab_size
A__ : List[str] =type_sequence_label_size
A__ : List[Any] =initializer_range
A__ : List[str] =num_labels
A__ : Any =num_choices
A__ : Union[str, Any] =relative_attention
A__ : int =position_biased_input
A__ : Any =pos_att_type
A__ : List[str] =scope
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
A__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Optional[Any] =None
if self.use_input_mask:
A__ : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
A__ : str =None
if self.use_token_type_ids:
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Any =None
A__ : str =None
A__ : Any =None
if self.use_labels:
A__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : int =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Any =ids_tensor([self.batch_size] , self.num_choices )
A__ : Optional[int] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase__ ( self : int , lowerCAmelCase_ : List[Any] ) -> Any:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] ) -> List[Any]:
'''simple docstring'''
A__ : Dict =DebertaVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[int] =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
A__ : Dict =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
A__ : List[Any] =model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =DebertaVaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> Any:
'''simple docstring'''
A__ : str =self.num_labels
A__ : Union[str, Any] =DebertaVaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def lowercase__ ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> str:
'''simple docstring'''
A__ : Tuple =self.num_labels
A__ : Tuple =DebertaVaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : List[Any] =DebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[int] =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
A__ : Any =DebertaVaForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : int =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Optional[Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ : List[str] =self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : int =config_and_inputs
A__ : str ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__snake_case = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =DebertaVaModelTester(self )
A__ : List[Any] =ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
A__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ )
@slow
def lowercase__ ( self : str ) -> Any:
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : List[Any] =DebertaVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
pass
@slow
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
A__ : Dict =DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
A__ : List[Any] =torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
A__ : Union[str, Any] =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ : Optional[int] =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
A__ : Optional[Any] =torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) , f"{output[:, 1:4, 1:4]}" )
| 717 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__snake_case : Union[str, Any] = logging.getLogger(__name__)
__snake_case : int = tf.data.AUTOTUNE
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : str =argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""", type=__snake_case, default="""roberta-base""", help="""The model config to use. Note that we don't copy the model's weights, only the config!""", )
parser.add_argument(
"""--tokenizer""", type=__snake_case, default="""unigram-tokenizer-wikitext""", help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""", )
parser.add_argument(
"""--per_replica_batch_size""", type=__snake_case, default=8, help="""Batch size per TPU core.""", )
parser.add_argument(
"""--no_tpu""", action="""store_true""", help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""", )
parser.add_argument(
"""--tpu_name""", type=__snake_case, help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""", default="""local""", )
parser.add_argument(
"""--tpu_zone""", type=__snake_case, help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""", )
parser.add_argument(
"""--gcp_project""", type=__snake_case, help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""", action="""store_true""", help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""", )
parser.add_argument(
"""--train_dataset""", type=__snake_case, help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--shuffle_buffer_size""", type=__snake_case, default=2**18, help="""Size of the shuffle buffer (in samples)""", )
parser.add_argument(
"""--eval_dataset""", type=__snake_case, help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--num_epochs""", type=__snake_case, default=1, help="""Number of epochs to train for.""", )
parser.add_argument(
"""--learning_rate""", type=__snake_case, default=1E-4, help="""Learning rate to use for training.""", )
parser.add_argument(
"""--weight_decay_rate""", type=__snake_case, default=1E-3, help="""Weight decay rate to use for training.""", )
parser.add_argument(
"""--max_length""", type=__snake_case, default=512, help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""", )
parser.add_argument(
"""--mlm_probability""", type=__snake_case, default=0.15, help="""Fraction of tokens to mask during training.""", )
parser.add_argument("""--output_dir""", type=__snake_case, required=__snake_case, help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""", type=__snake_case, help="""Model ID to upload to on the Hugging Face Hub.""" )
A__ : Optional[Any] =parser.parse_args()
return args
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
if args.tpu_name:
A__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name, zone=args.tpu_zone, project=args.gcp_project )
else:
A__ : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(__snake_case )
tf.tpu.experimental.initialize_tpu_system(__snake_case )
return tpu
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : Any =0
for file in file_list:
A__ : Optional[int] =file.split("""/""" )[-1]
A__ : Union[str, Any] =re.search(r"""-\d+-(\d+)\.tfrecord""", __snake_case ).group(1 )
A__ : str =int(__snake_case )
num_samples += sample_count
return num_samples
def __lowerCamelCase ( __snake_case : List[str], __snake_case : int, __snake_case : Any, __snake_case : List[Any], __snake_case : int, __snake_case : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] =count_samples(__snake_case )
A__ : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__snake_case )
if shuffle:
A__ : Optional[int] =dataset.shuffle(len(__snake_case ) )
A__ : List[str] =tf.data.TFRecordDataset(__snake_case, num_parallel_reads=__snake_case )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
A__ : int =dataset.apply(tf.data.experimental.assert_cardinality(__snake_case ) )
A__ : Any =dataset.map(__snake_case, num_parallel_calls=__snake_case )
if shuffle:
assert shuffle_buffer_size is not None
A__ : List[Any] =dataset.shuffle(args.shuffle_buffer_size )
A__ : int =dataset.batch(__snake_case, drop_remainder=__snake_case )
A__ : Optional[int] =dataset.map(__snake_case, num_parallel_calls=__snake_case )
A__ : Tuple =dataset.prefetch(__snake_case )
return dataset
def __lowerCamelCase ( __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
if not args.no_tpu:
A__ : Dict =initialize_tpu(__snake_case )
A__ : int =tf.distribute.TPUStrategy(__snake_case )
else:
A__ : List[str] =tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
A__ : Tuple =AutoTokenizer.from_pretrained(args.tokenizer )
A__ : List[str] =AutoConfig.from_pretrained(args.pretrained_model_config )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Tuple =tf.io.gfile.glob(os.path.join(args.train_dataset, """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"No .tfrecord files found in {args.train_dataset}." )
A__ : Optional[Any] =tf.io.gfile.glob(os.path.join(args.eval_dataset, """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." )
A__ : Optional[Any] =count_samples(__snake_case )
A__ : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
A__ : str =steps_per_epoch * args.num_epochs
with strategy.scope():
A__ : List[str] =TFAutoModelForMaskedLM.from_config(__snake_case )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
A__ , A__ : Optional[Any] =create_optimizer(
num_train_steps=__snake_case, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__snake_case, metrics=["""accuracy"""] )
def decode_fn(__snake_case : Tuple ):
A__ : Dict ={
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__snake_case, __snake_case )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
A__ : List[Any] =DataCollatorForLanguageModeling(
tokenizer=__snake_case, mlm_probability=args.mlm_probability, mlm=__snake_case, return_tensors="""tf""" )
def mask_with_collator(__snake_case : Optional[int] ):
# TF really needs an isin() function
A__ : Union[str, Any] =(
~tf.cast(batch["""attention_mask"""], tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
A__ , A__ : List[str] =data_collator.tf_mask_tokens(
batch["""input_ids"""], vocab_size=len(__snake_case ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=__snake_case, )
return batch
A__ : List[Any] =args.per_replica_batch_size * strategy.num_replicas_in_sync
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, shuffle_buffer_size=args.shuffle_buffer_size, )
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, )
A__ : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=__snake_case ) )
model.fit(
__snake_case, validation_data=__snake_case, epochs=args.num_epochs, callbacks=__snake_case, )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__snake_case : str = parse_args()
main(args)
| 687 | 0 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
A = '''examples/'''
A = {
'''examples''': (re.compile(R'''^check_min_version\(\"[^\"]+\"\)\s*$''', re.MULTILINE), '''check_min_version(\"VERSION\")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+\"([^\"]+)\"\s*$''', re.MULTILINE), '''__version__ = \"VERSION\"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*\"[^\"]+\",''', re.MULTILINE), R'''\1version=\"VERSION\",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*\"[^\"]+\"$''', re.MULTILINE), '''release = \"VERSION\"\n'''),
}
A = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
A = '''README.md'''
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any]) -> List[Any]:
'''simple docstring'''
with open(lowerCamelCase__ , 'r' , encoding='utf-8' , newline='\n') as f:
_lowercase : Optional[int] = f.read()
_lowercase : int = REPLACE_PATTERNS[pattern]
_lowercase : str = replace.replace('VERSION' , lowerCamelCase__)
_lowercase : List[Any] = re_pattern.sub(lowerCamelCase__ , lowerCamelCase__)
with open(lowerCamelCase__ , 'w' , encoding='utf-8' , newline='\n') as f:
f.write(lowerCamelCase__)
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Any) -> int:
'''simple docstring'''
for folder, directories, fnames in os.walk(lowerCamelCase__):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects')
if "legacy" in directories:
directories.remove('legacy')
for fname in fnames:
if fname.endswith('.py'):
update_version_in_file(os.path.join(lowerCamelCase__ , lowerCamelCase__) , lowerCamelCase__ , pattern='examples')
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any]=False) -> int:
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
if not patch:
update_version_in_examples(lowerCamelCase__)
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
'''simple docstring'''
_lowercase : Optional[Any] = '🤗 Transformers currently provides the following architectures'
_lowercase : Tuple = '1. Want to contribute a new model?'
with open(lowerCamelCase__ , 'r' , encoding='utf-8' , newline='\n') as f:
_lowercase : Dict = f.readlines()
# Find the start of the list.
_lowercase : Union[str, Any] = 0
while not lines[start_index].startswith(_start_prompt):
start_index += 1
start_index += 1
_lowercase : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt):
if lines[index].startswith('1.'):
_lowercase : Optional[int] = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(lowerCamelCase__ , 'w' , encoding='utf-8' , newline='\n') as f:
f.writelines(lowerCamelCase__)
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
with open(REPLACE_FILES['init'] , 'r') as f:
_lowercase : Optional[Any] = f.read()
_lowercase : int = REPLACE_PATTERNS['init'][0].search(lowerCamelCase__).groups()[0]
return packaging.version.parse(lowerCamelCase__)
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : str=False) -> Tuple:
'''simple docstring'''
_lowercase : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!')
if default_version.is_devrelease:
_lowercase : Tuple = default_version.base_version
elif patch:
_lowercase : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
_lowercase : Any = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
_lowercase : Optional[Any] = input(F'''Which version are you releasing? [{default_version}]''')
if len(lowerCamelCase__) == 0:
_lowercase : int = default_version
print(F'''Updating version to {version}.''')
global_version_update(lowerCamelCase__ , patch=lowerCamelCase__)
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
_lowercase : Optional[int] = get_version()
_lowercase : Dict = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
_lowercase : str = current_version.base_version
# Check with the user we got that right.
_lowercase : List[str] = input(F'''Which version are we developing now? [{dev_version}]''')
if len(lowerCamelCase__) == 0:
_lowercase : List[Any] = dev_version
print(F'''Updating version to {version}.''')
global_version_update(lowerCamelCase__)
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
A = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work() | 125 |
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list:
__lowerCamelCase : Union[str, Any] = [True] * n
__lowerCamelCase : List[Any] = False
__lowerCamelCase : int = False
__lowerCamelCase : Any = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
__lowerCamelCase : List[str] = i * 2
while index < n:
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = index + i
__lowerCamelCase : Optional[Any] = [2]
for i in range(3 , lowerCamelCase__ , 2 ):
if is_prime[i]:
primes.append(lowerCamelCase__ )
return primes
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 9_9_9_9_6_6_6_6_3_3_3_3 ) -> int:
__lowerCamelCase : Optional[Any] = math.floor(math.sqrt(lowerCamelCase__ ) ) + 1_0_0
__lowerCamelCase : Dict = prime_sieve(lowerCamelCase__ )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Dict = 0
__lowerCamelCase : Any = primes[prime_index]
while (last_prime**2) <= limit:
__lowerCamelCase : Any = primes[prime_index + 1]
__lowerCamelCase : Optional[Any] = last_prime**2
__lowerCamelCase : Dict = next_prime**2
# Get numbers divisible by lps(current)
__lowerCamelCase : Tuple = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__lowerCamelCase : Any = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__lowerCamelCase : List[Any] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__lowerCamelCase : Dict = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 652 | 0 |
from __future__ import annotations
def snake_case__ ( lowerCamelCase_ ):
A : Optional[Any] = 0.00
A : Union[str, Any] = 0
for resistor in resistors:
if resistor <= 0:
A : List[Any] = F'Resistor at index {index} has a negative or zero value!'
raise ValueError(lowerCamelCase_ )
first_sum += 1 / float(lowerCamelCase_ )
index += 1
return 1 / first_sum
def snake_case__ ( lowerCamelCase_ ):
A : List[Any] = 0.00
A : Optional[Any] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
A : List[str] = F'Resistor at index {index} has a negative value!'
raise ValueError(lowerCamelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 423 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : List[str] = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = '''vit_mae'''
def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=2_24 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=16 , __UpperCAmelCase=5_12 , __UpperCAmelCase=8 , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.7_5 , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**__UpperCAmelCase )
A : List[Any] = hidden_size
A : str = num_hidden_layers
A : Optional[Any] = num_attention_heads
A : List[str] = intermediate_size
A : Any = hidden_act
A : str = hidden_dropout_prob
A : Optional[int] = attention_probs_dropout_prob
A : Optional[Any] = initializer_range
A : Optional[int] = layer_norm_eps
A : List[Any] = image_size
A : Tuple = patch_size
A : Optional[int] = num_channels
A : int = qkv_bias
A : Optional[Any] = decoder_num_attention_heads
A : Optional[Any] = decoder_hidden_size
A : Union[str, Any] = decoder_num_hidden_layers
A : List[str] = decoder_intermediate_size
A : List[Any] = mask_ratio
A : Union[str, Any] = norm_pix_loss
| 423 | 1 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(args=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 24 |
'''simple docstring'''
def _snake_case ( A ) -> int:
if n == 1 or not isinstance(A , A ):
return 0
elif n == 2:
return 1
else:
lowerCAmelCase__ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _snake_case ( A ) -> int:
lowerCAmelCase__ = 0
lowerCAmelCase__ = 2
while digits < n:
index += 1
lowerCAmelCase__ = len(str(fibonacci(A ) ) )
return index
def _snake_case ( A = 1000 ) -> int:
return fibonacci_digits_index(A )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 90 | 0 |
from __future__ import annotations
from cmath import sqrt
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
UpperCamelCase__ : Dict = b * b - 4 * a * c
UpperCamelCase__ : int = (-b + sqrt(SCREAMING_SNAKE_CASE )) / (2 * a)
UpperCamelCase__ : List[Any] = (-b - sqrt(SCREAMING_SNAKE_CASE )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : Tuple = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 106 |
import functools
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : str = len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE )
@functools.cache
def min_distance(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCamelCase__ : Tuple = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , SCREAMING_SNAKE_CASE ) , 1 + min_distance(SCREAMING_SNAKE_CASE , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106 | 1 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class a_ :
a : Any = MBartConfig
a : Any = {}
a : str = '''gelu'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=20 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=0 , ):
_lowercase = parent
_lowercase = batch_size
_lowercase = seq_length
_lowercase = is_training
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = bos_token_id
def UpperCamelCase_ ( self ):
_lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowercase = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowercase = prepare_mbart_inputs_dict(__snake_case , __snake_case , __snake_case )
return config, inputs_dict
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
_lowercase = TFMBartModel(config=__snake_case ).get_decoder()
_lowercase = inputs_dict["""input_ids"""]
_lowercase = input_ids[:1, :]
_lowercase = inputs_dict["""attention_mask"""][:1, :]
_lowercase = inputs_dict["""head_mask"""]
_lowercase = 1
# first forward pass
_lowercase = model(__snake_case , attention_mask=__snake_case , head_mask=__snake_case , use_cache=__snake_case )
_lowercase , _lowercase = outputs.to_tuple()
_lowercase = past_key_values[1]
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , ) -> Dict:
if attention_mask is None:
_lowercase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowercase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
a : Dict = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
a : Optional[Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
a : List[str] = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
a : Optional[int] = True
a : List[Any] = False
a : str = False
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCamelCase_ ( self ):
_lowercase = TFMBartModelTester(self )
_lowercase = ConfigTester(self , config_class=__snake_case )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
_lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__snake_case )
@require_sentencepiece
@require_tokenizers
@require_tf
class a_ ( unittest.TestCase ):
a : List[Any] = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
a : int = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
a : List[str] = '''facebook/mbart-large-en-ro'''
@cached_property
def UpperCamelCase_ ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase_ ( self ):
_lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCamelCase_ ( self , **__UpperCamelCase ):
_lowercase = self.translate_src_text(**__snake_case )
self.assertListEqual(self.expected_text , __snake_case )
def UpperCamelCase_ ( self , **__UpperCamelCase ):
_lowercase = self.tokenizer(self.src_text , **__snake_case , return_tensors="""tf""" )
_lowercase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
_lowercase = self.tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
return generated_words
@slow
def UpperCamelCase_ ( self ):
self._assert_generated_batch_equal_expected() | 287 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class UpperCAmelCase ( snake_case_ ):
def __init__( self : Union[str, Any] , __snake_case : Callable , __snake_case : Optional[Features] = None , __snake_case : str = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[dict] = None , __snake_case : Optional[int] = None , **__snake_case : Optional[Any] , ) -> Union[str, Any]:
super().__init__(
features=__snake_case , cache_dir=__snake_case , keep_in_memory=__snake_case , streaming=__snake_case , num_proc=__snake_case , **__snake_case , )
_lowerCAmelCase = Generator(
cache_dir=__snake_case , features=__snake_case , generator=__snake_case , gen_kwargs=__snake_case , **__snake_case , )
def lowercase__ ( self : str ) -> Dict:
# Build iterable dataset
if self.streaming:
_lowerCAmelCase = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=__snake_case , download_mode=__snake_case , verification_mode=__snake_case , base_path=__snake_case , num_proc=self.num_proc , )
_lowerCAmelCase = self.builder.as_dataset(
split="""train""" , verification_mode=__snake_case , in_memory=self.keep_in_memory )
return dataset
| 207 | 0 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __magic_name__ ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : Dict = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
UpperCamelCase : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
_lowerCAmelCase = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
torch.manual_seed(0 )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_lowerCAmelCase = CLIPTextModel(__magic_name__ )
_lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowerCamelCase ( self , __magic_name__ , __magic_name__=0 ):
"""simple docstring"""
if str(__magic_name__ ).startswith('mps' ):
_lowerCAmelCase = torch.manual_seed(__magic_name__ )
else:
_lowerCAmelCase = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
_lowerCAmelCase = 2
_lowerCAmelCase = randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=__magic_name__ , device=torch.device(__magic_name__ ) , )
_lowerCAmelCase = floats_tensor(control_image.shape , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(__magic_name__ ) ).convert('RGB' ).resize((6_4, 6_4) )
_lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class __magic_name__ ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : str = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : Optional[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
def init_weights(__magic_name__ ):
if isinstance(__magic_name__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_lowerCAmelCase = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(__magic_name__ )
torch.manual_seed(0 )
_lowerCAmelCase = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(__magic_name__ )
torch.manual_seed(0 )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_lowerCAmelCase = CLIPTextModel(__magic_name__ )
_lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase = MultiControlNetModel([controlneta, controlneta] )
_lowerCAmelCase = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowerCamelCase ( self , __magic_name__ , __magic_name__=0 ):
"""simple docstring"""
if str(__magic_name__ ).startswith('mps' ):
_lowerCAmelCase = torch.manual_seed(__magic_name__ )
else:
_lowerCAmelCase = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
_lowerCAmelCase = 2
_lowerCAmelCase = [
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=__magic_name__ , device=torch.device(__magic_name__ ) , ),
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=__magic_name__ , device=torch.device(__magic_name__ ) , ),
]
_lowerCAmelCase = floats_tensor(control_image[0].shape , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(__magic_name__ ) ).convert('RGB' ).resize((6_4, 6_4) )
_lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
_lowerCAmelCase = 10.0
_lowerCAmelCase = 4
_lowerCAmelCase = self.get_dummy_inputs(__magic_name__ )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__magic_name__ )[0]
_lowerCAmelCase = self.get_dummy_inputs(__magic_name__ )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__magic_name__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_lowerCAmelCase = self.get_dummy_inputs(__magic_name__ )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__magic_name__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_lowerCAmelCase = self.get_dummy_inputs(__magic_name__ )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__magic_name__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def _lowerCamelCase ( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__magic_name__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
_lowerCAmelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=__magic_name__ , controlnet=__magic_name__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__magic_name__ )
_lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase = 'evil space-punk bird'
_lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((5_1_2, 5_1_2) )
_lowerCAmelCase = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((5_1_2, 5_1_2) )
_lowerCAmelCase = pipe(
__magic_name__ , __magic_name__ , control_image=__magic_name__ , generator=__magic_name__ , output_type='np' , num_inference_steps=5_0 , strength=0.6 , )
_lowerCAmelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
_lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9e-2
| 309 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__ : Any = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ["""PoolFormerFeatureExtractor"""]
a__ : Any = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 309 | 1 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[Any] ):
__lowercase = tmp_path / '''cache'''
__lowercase = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowercase = TextDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ ).read()
_check_text_dataset(lowerCamelCase_ , lowerCamelCase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any ):
__lowercase = tmp_path / '''cache'''
__lowercase = {'''text''': '''string'''}
__lowercase = features.copy() if features else default_expected_features
__lowercase = (
Features({feature: Value(lowerCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowercase = TextDatasetReader(lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ ).read()
_check_text_dataset(lowerCamelCase_ , lowerCamelCase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] ):
__lowercase = tmp_path / '''cache'''
__lowercase = {'''text''': '''string'''}
__lowercase = TextDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ , split=lowerCamelCase_ ).read()
_check_text_dataset(lowerCamelCase_ , lowerCamelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
if issubclass(lowerCamelCase_ , lowerCamelCase_ ):
__lowercase = text_path
elif issubclass(lowerCamelCase_ , lowerCamelCase_ ):
__lowercase = [text_path]
__lowercase = tmp_path / '''cache'''
__lowercase = {'''text''': '''string'''}
__lowercase = TextDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ ).read()
_check_text_dataset(lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any=("train",) ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
for split in splits:
__lowercase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
__lowercase = tmp_path / '''cache'''
__lowercase = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowercase = TextDatasetReader({'''train''': text_path} , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ ).read()
_check_text_datasetdict(lowerCamelCase_ , lowerCamelCase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str ):
__lowercase = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__lowercase = {'''text''': '''string'''}
__lowercase = features.copy() if features else default_expected_features
__lowercase = (
Features({feature: Value(lowerCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowercase = TextDatasetReader({'''train''': text_path} , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ ).read()
_check_text_datasetdict(lowerCamelCase_ , lowerCamelCase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : str ):
if split:
__lowercase = {split: text_path}
else:
__lowercase = '''train'''
__lowercase = {'''train''': text_path, '''test''': text_path}
__lowercase = tmp_path / '''cache'''
__lowercase = {'''text''': '''string'''}
__lowercase = TextDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ ).read()
_check_text_datasetdict(lowerCamelCase_ , lowerCamelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 502 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( lowerCamelCase_ : list , lowerCamelCase_ : int | None = None , lowerCamelCase_ : int | None = None ):
if start is None:
__lowercase = 0
if end is None:
__lowercase = len(lowerCamelCase_ ) - 1
if start >= end:
return
__lowercase = (start + end) // 2
slowsort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
slowsort(lowerCamelCase_ , mid + 1 , lowerCamelCase_ )
if sequence[end] < sequence[mid]:
__lowercase , __lowercase = sequence[mid], sequence[end]
slowsort(lowerCamelCase_ , lowerCamelCase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 502 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : List[str] = 'RegNetConfig'
# Base docstring
_lowerCamelCase : List[str] = 'facebook/regnet-y-040'
_lowerCamelCase : List[Any] = [1, 1088, 7, 7]
# Image classification docstring
_lowerCamelCase : Dict = 'facebook/regnet-y-040'
_lowerCamelCase : Any = 'tabby, tabby cat'
_lowerCamelCase : Dict = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class snake_case__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : Optional[str] = "relu" , ) -> int:
super().__init__()
UpperCAmelCase_ = nn.Convad(
lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , groups=lowerCAmelCase_ , bias=lowerCAmelCase_ , )
UpperCAmelCase_ = nn.BatchNormad(lowerCAmelCase_ )
UpperCAmelCase_ = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCamelCase ( self : List[Any] , lowerCAmelCase_ : Union[str, Any] ) -> int:
UpperCAmelCase_ = self.convolution(lowerCAmelCase_ )
UpperCAmelCase_ = self.normalization(lowerCAmelCase_ )
UpperCAmelCase_ = self.activation(lowerCAmelCase_ )
return hidden_state
class snake_case__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : RegNetConfig ) -> str:
super().__init__()
UpperCAmelCase_ = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
UpperCAmelCase_ = config.num_channels
def UpperCamelCase ( self : Any , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
UpperCAmelCase_ = self.embedder(lowerCAmelCase_ )
return hidden_state
class snake_case__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int = 2 ) -> Dict:
super().__init__()
UpperCAmelCase_ = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ )
UpperCAmelCase_ = nn.BatchNormad(lowerCAmelCase_ )
def UpperCamelCase ( self : Any , lowerCAmelCase_ : Tensor ) -> Tensor:
UpperCAmelCase_ = self.convolution(lowerCAmelCase_ )
UpperCAmelCase_ = self.normalization(lowerCAmelCase_ )
return hidden_state
class snake_case__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict:
super().__init__()
UpperCAmelCase_ = nn.AdaptiveAvgPoolad((1, 1) )
UpperCAmelCase_ = nn.Sequential(
nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , nn.Sigmoid() , )
def UpperCamelCase ( self : List[Any] , lowerCAmelCase_ : List[str] ) -> Optional[int]:
# b c h w -> b c 1 1
UpperCAmelCase_ = self.pooler(lowerCAmelCase_ )
UpperCAmelCase_ = self.attention(lowerCAmelCase_ )
UpperCAmelCase_ = hidden_state * attention
return hidden_state
class snake_case__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : RegNetConfig , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int = 1 ) -> str:
super().__init__()
UpperCAmelCase_ = in_channels != out_channels or stride != 1
UpperCAmelCase_ = max(1 , out_channels // config.groups_width )
UpperCAmelCase_ = (
RegNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
UpperCAmelCase_ = nn.Sequential(
RegNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , groups=lowerCAmelCase_ , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , )
UpperCAmelCase_ = ACTaFN[config.hidden_act]
def UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] ) -> Any:
UpperCAmelCase_ = hidden_state
UpperCAmelCase_ = self.layer(lowerCAmelCase_ )
UpperCAmelCase_ = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
UpperCAmelCase_ = self.activation(lowerCAmelCase_ )
return hidden_state
class snake_case__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase_ : RegNetConfig , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int = 1 ) -> Optional[int]:
super().__init__()
UpperCAmelCase_ = in_channels != out_channels or stride != 1
UpperCAmelCase_ = max(1 , out_channels // config.groups_width )
UpperCAmelCase_ = (
RegNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
UpperCAmelCase_ = nn.Sequential(
RegNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , groups=lowerCAmelCase_ , activation=config.hidden_act ) , RegNetSELayer(lowerCAmelCase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , )
UpperCAmelCase_ = ACTaFN[config.hidden_act]
def UpperCamelCase ( self : List[str] , lowerCAmelCase_ : List[str] ) -> str:
UpperCAmelCase_ = hidden_state
UpperCAmelCase_ = self.layer(lowerCAmelCase_ )
UpperCAmelCase_ = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
UpperCAmelCase_ = self.activation(lowerCAmelCase_ )
return hidden_state
class snake_case__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : RegNetConfig , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , ) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
UpperCAmelCase_ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for _ in range(depth - 1 )] , )
def UpperCamelCase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ = self.layers(lowerCAmelCase_ )
return hidden_state
class snake_case__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase_ : RegNetConfig ) -> int:
super().__init__()
UpperCAmelCase_ = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
UpperCAmelCase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) )
def UpperCamelCase ( self : Tuple , lowerCAmelCase_ : Tensor , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True ) -> BaseModelOutputWithNoAttention:
UpperCAmelCase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCAmelCase_ = hidden_states + (hidden_state,)
UpperCAmelCase_ = stage_module(lowerCAmelCase_ )
if output_hidden_states:
UpperCAmelCase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ )
class snake_case__ ( __snake_case ):
'''simple docstring'''
__A = RegNetConfig
__A = '''regnet'''
__A = '''pixel_values'''
__A = True
def UpperCamelCase ( self : List[Any] , lowerCAmelCase_ : Optional[int] ) -> int:
if isinstance(lowerCAmelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def UpperCamelCase ( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int=False ) -> List[Any]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ = value
_lowerCamelCase : Any = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowerCamelCase : Any = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , __snake_case , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class snake_case__ ( __snake_case ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase_ : Dict ) -> List[Any]:
super().__init__(lowerCAmelCase_ )
UpperCAmelCase_ = config
UpperCAmelCase_ = RegNetEmbeddings(lowerCAmelCase_ )
UpperCAmelCase_ = RegNetEncoder(lowerCAmelCase_ )
UpperCAmelCase_ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self : Optional[int] , lowerCAmelCase_ : Tensor , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
UpperCAmelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ = self.embedder(lowerCAmelCase_ )
UpperCAmelCase_ = self.encoder(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
UpperCAmelCase_ = encoder_outputs[0]
UpperCAmelCase_ = self.pooler(lowerCAmelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __snake_case , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class snake_case__ ( __snake_case ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase_ : int ) -> Optional[int]:
super().__init__(lowerCAmelCase_ )
UpperCAmelCase_ = config.num_labels
UpperCAmelCase_ = RegNetModel(lowerCAmelCase_ )
# classification head
UpperCAmelCase_ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self : List[Any] , lowerCAmelCase_ : Optional[torch.FloatTensor] = None , lowerCAmelCase_ : Optional[torch.LongTensor] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
UpperCAmelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ = self.regnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
UpperCAmelCase_ = outputs.pooler_output if return_dict else outputs[1]
UpperCAmelCase_ = self.classifier(lowerCAmelCase_ )
UpperCAmelCase_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCAmelCase_ = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCAmelCase_ = '''single_label_classification'''
else:
UpperCAmelCase_ = '''multi_label_classification'''
if self.config.problem_type == "regression":
UpperCAmelCase_ = MSELoss()
if self.num_labels == 1:
UpperCAmelCase_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCAmelCase_ = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
UpperCAmelCase_ = CrossEntropyLoss()
UpperCAmelCase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCAmelCase_ = BCEWithLogitsLoss()
UpperCAmelCase_ = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
UpperCAmelCase_ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states )
| 407 |
from pathlib import Path
import fire
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :str , __magic_name__ :int ):
UpperCAmelCase_ = Path(__magic_name__ )
UpperCAmelCase_ = Path(__magic_name__ )
dest_dir.mkdir(exist_ok=__magic_name__ )
for path in src_dir.iterdir():
UpperCAmelCase_ = [x.rstrip() for x in list(path.open().readlines() )][:n]
UpperCAmelCase_ = dest_dir.joinpath(path.name )
print(__magic_name__ )
dest_path.open('''w''' ).write('''\n'''.join(__magic_name__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 407 | 1 |
def __lowerCAmelCase ( a__ = 10 ) -> str:
if not isinstance(a__ , a__ ) or n < 0:
raise ValueError('''Invalid input''' )
__a = 10**n
__a = 2_8433 * (pow(2 , 783_0457 , a__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(1_0) = }") | 219 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
A : str = logging.get_logger()
@dataclass
class __A:
snake_case_ = 42
snake_case_ = field(default_factory=a )
snake_case_ = field(default_factory=a )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = len(list(m.modules() ) ) == 1 or isinstance(_snake_case , nn.Convad ) or isinstance(_snake_case , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_snake_case )
def __call__( self , _snake_case ) -> Any:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_snake_case )
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
return list(filter(lambda _snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __A:
snake_case_ = 42
snake_case_ = 42
snake_case_ = 0
snake_case_ = field(default_factory=a )
snake_case_ = field(default_factory=a )
def __call__( self , _snake_case ) -> Dict:
'''simple docstring'''
__a = Tracker(self.dest )(_snake_case ).parametrized
__a = Tracker(self.src )(_snake_case ).parametrized
__a = list(filter(lambda _snake_case : type(_snake_case ) not in self.src_skip , _snake_case ) )
__a = list(filter(lambda _snake_case : type(_snake_case ) not in self.dest_skip , _snake_case ) )
if len(_snake_case ) != len(_snake_case ):
raise Exception(
F"""Numbers of operations are different. Source module has {len(_snake_case )} operations while"""
F""" destination module has {len(_snake_case )}.""" )
for dest_m, src_m in zip(_snake_case , _snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = True ) -> str:
print(F"""Converting {name}...""" )
with torch.no_grad():
__a = timm.create_model(a__ , pretrained=a__ ).eval()
__a = ResNetForImageClassification(a__ ).eval()
__a = ModuleTransfer(src=a__ , dest=a__ )
__a = torch.randn((1, 3, 224, 224) )
module_transfer(a__ )
assert torch.allclose(from_model(a__ ) , our_model(a__ ).logits ), "The model logits don't match the original one."
__a = F"""resnet{'-'.join(name.split('resnet' ) )}"""
print(a__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=a__ , )
# we can use the convnext one
__a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=a__ , )
print(F"""Pushed {checkpoint_name}""" )
def __lowerCAmelCase ( a__ , a__ = None , a__ = True ) -> List[Any]:
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = (1, num_labels)
__a = '''huggingface/label-files'''
__a = num_labels
__a = json.load(open(hf_hub_download(a__ , a__ , repo_type='''dataset''' ) , '''r''' ) )
__a = {int(a__ ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = partial(a__ , num_labels=a__ , idalabel=a__ , labelaid=a__ )
__a = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(a__ , names_to_config[model_name] , a__ , a__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a__ , a__ , a__ , a__ )
return config, expected_shape
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
A : List[Any] = parser.parse_args()
A : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 219 | 1 |
"""simple docstring"""
from __future__ import annotations
_lowerCamelCase = 8.988e9 # units = N * m^s * C^-2
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
__SCREAMING_SNAKE_CASE : int = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__SCREAMING_SNAKE_CASE : List[str] = abs(lowercase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__SCREAMING_SNAKE_CASE : Optional[int] = abs(lowercase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__SCREAMING_SNAKE_CASE : Tuple = (COULOMBS_CONSTANT * charge_product / abs(lowercase_ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 401 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class snake_case :
def __init__( self :Dict , _lowerCamelCase :List[str] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = str(id_ )
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Optional[Any] = {} # {vertex:distance}
def __lt__( self :Any , _lowerCamelCase :Any ):
return self.key < other.key
def __repr__( self :Any ):
return self.id
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :str ):
self.neighbors.append(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , _lowerCamelCase :Any , _lowerCamelCase :Tuple ):
__SCREAMING_SNAKE_CASE : int = weight
def lowerCAmelCase_ ( lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Dict ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowercase_ )
graph[b - 1].add_edge(graph[a - 1] , lowercase_ )
def lowerCAmelCase_ ( lowercase_ : list , lowercase_ : Vertex ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = []
for u in graph:
__SCREAMING_SNAKE_CASE : Tuple = math.inf
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Dict = graph[:]
while q:
__SCREAMING_SNAKE_CASE : Tuple = min(lowercase_ )
q.remove(lowercase_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__SCREAMING_SNAKE_CASE : Tuple = u
__SCREAMING_SNAKE_CASE : List[str] = u.edges[v.id]
for i in range(1 , len(lowercase_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowerCAmelCase_ ( lowercase_ : list , lowercase_ : Vertex ):
'''simple docstring'''
for u in graph:
__SCREAMING_SNAKE_CASE : Optional[Any] = math.inf
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = list(lowercase_ )
hq.heapify(lowercase_ )
while h:
__SCREAMING_SNAKE_CASE : int = hq.heappop(lowercase_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__SCREAMING_SNAKE_CASE : Union[str, Any] = u
__SCREAMING_SNAKE_CASE : int = u.edges[v.id]
hq.heapify(lowercase_ )
for i in range(1 , len(lowercase_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowerCAmelCase_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 401 | 1 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def snake_case_ ( A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = np.inf
def set_batch_size(A_ : Any ) -> None:
nonlocal batch_size
if isinstance(A_, A_ ):
_lowerCamelCase : List[Any] = min(A_, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(A_, A_ ):
_lowerCamelCase : Any = min(A_, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(A_, A_ ) and feature.dtype == "binary":
_lowerCamelCase : int = min(A_, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(A_, A_ )
return None if batch_size is np.inf else batch_size
class __snake_case ( UpperCAmelCase_):
def __init__( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple = None , __lowerCAmelCase : Union[str, Any] = None , __lowerCAmelCase : Any = None , __lowerCAmelCase : str = False , __lowerCAmelCase : str = False , __lowerCAmelCase : List[str] = None , **__lowerCAmelCase : int , ):
"""simple docstring"""
super().__init__(
UpperCamelCase__ , split=UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , streaming=UpperCamelCase__ , num_proc=UpperCamelCase__ , **UpperCamelCase__ , )
_lowerCamelCase : Any = path_or_paths if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else {self.split: path_or_paths}
_lowerCamelCase : List[Any] = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
_lowerCamelCase : Union[str, Any] = Parquet(
cache_dir=UpperCamelCase__ , data_files=UpperCamelCase__ , features=UpperCamelCase__ , hash=UpperCamelCase__ , **UpperCamelCase__ , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self.streaming:
_lowerCamelCase : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Optional[int] = None
self.builder.download_and_prepare(
download_config=UpperCamelCase__ , download_mode=UpperCamelCase__ , verification_mode=UpperCamelCase__ , base_path=UpperCamelCase__ , num_proc=self.num_proc , )
_lowerCamelCase : str = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class __snake_case :
def __init__( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any = None , **__lowerCAmelCase : int , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = dataset
_lowerCamelCase : Optional[Any] = path_or_buf
_lowerCamelCase : Union[str, Any] = batch_size or get_writer_batch_size(dataset.features )
_lowerCamelCase : List[Any] = parquet_writer_kwargs
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
_lowerCamelCase : int = self._write(file_obj=UpperCamelCase__ , batch_size=UpperCamelCase__ , **self.parquet_writer_kwargs )
else:
_lowerCamelCase : str = self._write(file_obj=self.path_or_buf , batch_size=UpperCamelCase__ , **self.parquet_writer_kwargs )
return written
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , **__lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Union[str, Any] = parquet_writer_kwargs.pop('''path_or_buf''' , UpperCamelCase__ )
_lowerCamelCase : Tuple = self.dataset.features.arrow_schema
_lowerCamelCase : str = pq.ParquetWriter(UpperCamelCase__ , schema=UpperCamelCase__ , **UpperCamelCase__ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCamelCase__ ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
_lowerCamelCase : int = query_table(
table=self.dataset._data , key=slice(UpperCamelCase__ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCamelCase__ )
written += batch.nbytes
writer.close()
return written
| 83 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCAmelCase =logging.getLogger(__name__)
__UpperCAmelCase =list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__UpperCAmelCase =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , )
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class lowerCAmelCase__ :
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The input training data file (a text file)."""} )
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
lowercase__ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
lowercase__ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
lowercase__ : bool = field(default=UpperCAmelCase_ , metadata={"""help""": """Whether ot not to use whole word mask."""} )
lowercase__ : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
lowercase__ : float = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
lowercase__ : int = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
lowercase__ : int = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
lowercase__ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __a ( A , A , A = False , A = None , ) -> Tuple:
'''simple docstring'''
def _dataset(A , A=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=A , file_path=A , block_size=args.block_size , ref_path=A , )
return LineByLineTextDataset(tokenizer=A , file_path=A , block_size=args.block_size )
else:
return TextDataset(
tokenizer=A , file_path=A , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=A , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(A ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __a ( ) -> Tuple:
'''simple docstring'''
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , A )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
A__ = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
A__ = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
A__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
A__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
A__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
A__ = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
A__ = AutoModelWithLMHead.from_config(A )
model.resize_token_embeddings(len(A ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
A__ = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
A__ = min(data_args.block_size , tokenizer.max_len )
# Get datasets
A__ = (
get_dataset(A , tokenizer=A , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
A__ = (
get_dataset(A , tokenizer=A , evaluate=A , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
A__ = DataCollatorForPermutationLanguageModeling(
tokenizer=A , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
A__ = DataCollatorForWholeWordMask(
tokenizer=A , mlm_probability=data_args.mlm_probability )
else:
A__ = DataCollatorForLanguageModeling(
tokenizer=A , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A__ = Trainer(
model=A , args=A , data_collator=A , train_dataset=A , eval_dataset=A , prediction_loss_only=A , )
# Training
if training_args.do_train:
A__ = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=A )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate()
A__ = math.exp(eval_output["eval_loss"] )
A__ = {"perplexity": perplexity}
A__ = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(A , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , A , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(A )
return results
def __a ( A ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main() | 337 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case_ ( A__ ):
"""simple docstring"""
__lowerCAmelCase : Dict ='''unispeech'''
def __init__( self , UpperCamelCase=32 , UpperCamelCase=7_68 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=30_72 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.0_2 , UpperCamelCase=1E-5 , UpperCamelCase="group" , UpperCamelCase="gelu" , UpperCamelCase=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase=False , UpperCamelCase=1_28 , UpperCamelCase=16 , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=0.0_5 , UpperCamelCase=10 , UpperCamelCase=2 , UpperCamelCase=0.0 , UpperCamelCase=10 , UpperCamelCase=0 , UpperCamelCase=3_20 , UpperCamelCase=2 , UpperCamelCase=0.1 , UpperCamelCase=1_00 , UpperCamelCase=2_56 , UpperCamelCase=2_56 , UpperCamelCase=0.1 , UpperCamelCase="mean" , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=2_56 , UpperCamelCase=80 , UpperCamelCase=0 , UpperCamelCase=1 , UpperCamelCase=2 , UpperCamelCase=0.5 , **UpperCamelCase , ):
super().__init__(**UpperCamelCase , pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase)
lowerCamelCase__ = hidden_size
lowerCamelCase__ = feat_extract_norm
lowerCamelCase__ = feat_extract_activation
lowerCamelCase__ = list(UpperCamelCase)
lowerCamelCase__ = list(UpperCamelCase)
lowerCamelCase__ = list(UpperCamelCase)
lowerCamelCase__ = conv_bias
lowerCamelCase__ = num_conv_pos_embeddings
lowerCamelCase__ = num_conv_pos_embedding_groups
lowerCamelCase__ = len(self.conv_dim)
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = feat_proj_dropout
lowerCamelCase__ = final_dropout
lowerCamelCase__ = layerdrop
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_ctc_classes
lowerCamelCase__ = vocab_size
lowerCamelCase__ = do_stable_layer_norm
lowerCamelCase__ = use_weighted_layer_sum
lowerCamelCase__ = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__ = apply_spec_augment
lowerCamelCase__ = mask_time_prob
lowerCamelCase__ = mask_time_length
lowerCamelCase__ = mask_time_min_masks
lowerCamelCase__ = mask_feature_prob
lowerCamelCase__ = mask_feature_length
lowerCamelCase__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase__ = num_codevectors_per_group
lowerCamelCase__ = num_codevector_groups
lowerCamelCase__ = contrastive_logits_temperature
lowerCamelCase__ = feat_quantizer_dropout
lowerCamelCase__ = num_negatives
lowerCamelCase__ = codevector_dim
lowerCamelCase__ = proj_codevector_dim
lowerCamelCase__ = diversity_loss_weight
# ctc loss
lowerCamelCase__ = ctc_loss_reduction
lowerCamelCase__ = ctc_zero_infinity
# pretraining loss
lowerCamelCase__ = replace_prob
@property
def __UpperCAmelCase ( self):
return functools.reduce(operator.mul , self.conv_stride , 1)
| 714 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCAmelCase_ = 1.0_5457_1817E-34 # unit of ℏ : J * s
lowerCAmelCase_ = 3E8 # unit of c : m * s^-1
def lowerCAmelCase( a__ : float , a__ : float , a__ : float ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
lowerCamelCase__ = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCamelCase__ = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCamelCase__ = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 426 | 0 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class SCREAMING_SNAKE_CASE__ :
def __lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Any = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Any = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Tuple = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Tuple = self.pipeline_class(**lowercase__ )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_inputs(lowercase__ )
SCREAMING_SNAKE_CASE_ : int = inputs["prompt"]
SCREAMING_SNAKE_CASE_ : List[str] = inputs["generator"]
SCREAMING_SNAKE_CASE_ : List[Any] = inputs["num_inference_steps"]
SCREAMING_SNAKE_CASE_ : str = inputs["output_type"]
if "image" in inputs:
SCREAMING_SNAKE_CASE_ : Dict = inputs["image"]
else:
SCREAMING_SNAKE_CASE_ : Tuple = None
if "mask_image" in inputs:
SCREAMING_SNAKE_CASE_ : List[str] = inputs["mask_image"]
else:
SCREAMING_SNAKE_CASE_ : Tuple = None
if "original_image" in inputs:
SCREAMING_SNAKE_CASE_ : Optional[int] = inputs["original_image"]
else:
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = pipe.encode_prompt(lowercase__ )
# inputs with prompt converted to embeddings
SCREAMING_SNAKE_CASE_ : Optional[int] = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = image
if mask_image is not None:
SCREAMING_SNAKE_CASE_ : int = mask_image
if original_image is not None:
SCREAMING_SNAKE_CASE_ : Any = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**lowercase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class.from_pretrained(lowercase__ )
pipe_loaded.to(lowercase__ )
pipe_loaded.set_progress_bar_config(disable=lowercase__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowercase__ , lowercase__ ) is None , F"`{optional_component}` did not stay set to None after loading." , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_inputs(lowercase__ )
SCREAMING_SNAKE_CASE_ : int = inputs["generator"]
SCREAMING_SNAKE_CASE_ : Dict = inputs["num_inference_steps"]
SCREAMING_SNAKE_CASE_ : Any = inputs["output_type"]
# inputs with prompt converted to embeddings
SCREAMING_SNAKE_CASE_ : Dict = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
SCREAMING_SNAKE_CASE_ : Any = image
if mask_image is not None:
SCREAMING_SNAKE_CASE_ : int = mask_image
if original_image is not None:
SCREAMING_SNAKE_CASE_ : Any = original_image
SCREAMING_SNAKE_CASE_ : Dict = pipe_loaded(**lowercase__ )[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = np.abs(to_np(lowercase__ ) - to_np(lowercase__ ) ).max()
self.assertLess(lowercase__ , 1e-4 )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Tuple = self.pipeline_class(**lowercase__ )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_dummy_inputs(lowercase__ )
SCREAMING_SNAKE_CASE_ : int = pipe(**lowercase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.pipeline_class.from_pretrained(lowercase__ )
pipe_loaded.to(lowercase__ )
pipe_loaded.set_progress_bar_config(disable=lowercase__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_inputs(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = pipe_loaded(**lowercase__ )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.abs(to_np(lowercase__ ) - to_np(lowercase__ ) ).max()
self.assertLess(lowercase__ , 1e-4 )
| 421 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = (1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int = 5_0_0_0 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE_ )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
SCREAMING_SNAKE_CASE_ : Dict = pentagonal_nums[j]
SCREAMING_SNAKE_CASE_ : Optional[int] = pentagonal_i + pentagonal_j
SCREAMING_SNAKE_CASE_ : Tuple = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE_ ) and is_pentagonal(SCREAMING_SNAKE_CASE_ ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 421 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 |
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : str = "" ,lowerCAmelCase__ : bool = False ) -> None:
'''simple docstring'''
lowerCAmelCase_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ : int = is_leaf
lowerCAmelCase_ : Optional[Any] = prefix
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for q, w in zip(self.prefix ,lowerCAmelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
lowerCAmelCase_ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ : List[Any] = RadixNode(prefix=lowerCAmelCase__ ,is_leaf=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = incoming_node.match(
lowerCAmelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ : Optional[int] = remaining_prefix
lowerCAmelCase_ : Optional[int] = self.nodes[matching_string[0]]
lowerCAmelCase_ : List[Any] = RadixNode(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Dict = aux_node
if remaining_word == "":
lowerCAmelCase_ : List[str] = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : int = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase_ : str = list(self.nodes.values() )[0]
lowerCAmelCase_ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ : Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase_ : Optional[Any] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ : Tuple = list(incoming_node.nodes.values() )[0]
lowerCAmelCase_ : Union[str, Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ : str = merging_node.nodes
return True
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height ,self.prefix ," (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
lowerCAmelCase_ : Dict = "banana bananas bandana band apple all beast".split()
lowerCAmelCase_ : List[Any] = RadixNode()
root.insert_many(snake_case__)
assert all(root.find(snake_case__) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = RadixNode()
lowerCAmelCase_ : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case__)
print("Words:" , snake_case__)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 683 | 1 |
'''simple docstring'''
from torch import nn
def lowerCAmelCase_ ( snake_case_ : int ) -> Dict:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""" )
| 78 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
SCREAMING_SNAKE_CASE__ = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] ):
__a : str = torch.load(lowerCamelCase_ , map_location='cpu' )
return sd
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Dict=rename_keys_prefix ):
__a : Optional[Any] = OrderedDict()
__a : Any = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__a : List[Any] = key
for name_pair in rename_keys_prefix:
__a : List[str] = new_key.replace(name_pair[0] , name_pair[1] )
__a : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__a : int = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Any ):
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
__a : Dict = 'pretraining'
if "vcr" in checkpoint_path:
__a : int = {'visual_embedding_dim': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
__a : int = {'visual_embedding_dim': 2_0_4_8}
elif "vqa" in checkpoint_path:
__a : Tuple = {'visual_embedding_dim': 2_0_4_8}
elif "nlvr" in checkpoint_path:
__a : List[Any] = {'visual_embedding_dim': 1_0_2_4}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
__a : int = {'visual_embedding_dim': 5_1_2}
__a : Any = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
__a : Any = {'visual_embedding_dim': 2_0_4_8}
__a : List[str] = 'vqa_advanced'
elif "vqa" in checkpoint_path:
__a : List[Any] = {'visual_embedding_dim': 2_0_4_8, 'num_labels': 3_1_2_9}
__a : List[Any] = 'vqa'
elif "nlvr" in checkpoint_path:
__a : Optional[int] = {
'visual_embedding_dim': 1_0_2_4,
'num_labels': 2,
}
__a : Optional[Any] = 'nlvr'
__a : str = VisualBertConfig(**lowerCamelCase_ )
# Load State Dict
__a : str = load_state_dict(lowerCamelCase_ )
__a : str = get_new_dict(lowerCamelCase_ , lowerCamelCase_ )
if model_type == "pretraining":
__a : Optional[Any] = VisualBertForPreTraining(lowerCamelCase_ )
elif model_type == "vqa":
__a : Any = VisualBertForQuestionAnswering(lowerCamelCase_ )
elif model_type == "nlvr":
__a : int = VisualBertForVisualReasoning(lowerCamelCase_ )
elif model_type == "multichoice":
__a : Optional[int] = VisualBertForMultipleChoice(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# Save Checkpoints
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 47 | 0 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__A = True
except ImportError:
__A = False
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
def _SCREAMING_SNAKE_CASE ( A : Namespace ) -> str:
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class a_ ( UpperCamelCase_ ):
@staticmethod
def SCREAMING_SNAKE_CASE__ (__a) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[Any] = parser.add_parser('add-new-model')
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.')
add_new_model_parser.add_argument('--testing_file' , type=__a , help='Configuration file on which to run.')
add_new_model_parser.add_argument(
'--path' , type=__a , help='Path to cookiecutter. Should only be used for testing purposes.')
add_new_model_parser.set_defaults(func=__a)
def __init__(self , __a , __a , __a=None , *__a) -> int:
"""simple docstring"""
__snake_case : Optional[int] = testing
__snake_case : List[str] = testing_file
__snake_case : Optional[Any] = path
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.')
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n')
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__snake_case : Any = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:2_2]]
if len(__a) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.')
__snake_case : Union[str, Any] = (
Path(__a).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent
)
__snake_case : str = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__a))
else:
with open(self._testing_file , 'r') as configuration_file:
__snake_case : List[Any] = json.load(__a)
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path) , no_input=__a , extra_context=__a , )
__snake_case : Union[str, Any] = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:2_2]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r') as configuration_file:
__snake_case : List[str] = json.load(__a)
__snake_case : Optional[Any] = configuration['lowercase_modelname']
__snake_case : List[Any] = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F"""{directory}/configuration.json""")
__snake_case : str = 'PyTorch' in generate_tensorflow_pytorch_and_flax
__snake_case : str = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
__snake_case : Any = 'Flax' in generate_tensorflow_pytorch_and_flax
__snake_case : Optional[Any] = F"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(__a , exist_ok=__a)
os.makedirs(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__a)
# Tests require submodules as they have parent imports
with open(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , 'w'):
pass
shutil.move(
F"""{directory}/__init__.py""" , F"""{model_dir}/__init__.py""" , )
shutil.move(
F"""{directory}/configuration_{lowercase_model_name}.py""" , F"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(__a):
with open(__a , 'r') as f:
__snake_case : Optional[int] = f.readlines()
with open(__a , 'w') as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__a)
if output_pytorch:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_{lowercase_model_name}.py""")
shutil.move(
F"""{directory}/modeling_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_{lowercase_model_name}.py""")
os.remove(F"""{directory}/test_modeling_{lowercase_model_name}.py""")
if output_tensorflow:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_tf_{lowercase_model_name}.py""")
shutil.move(
F"""{directory}/modeling_tf_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_tf_{lowercase_model_name}.py""")
os.remove(F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""")
if output_flax:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_flax_{lowercase_model_name}.py""")
shutil.move(
F"""{directory}/modeling_flax_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_flax_{lowercase_model_name}.py""")
os.remove(F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""")
shutil.move(
F"""{directory}/{lowercase_model_name}.md""" , F"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
F"""{directory}/tokenization_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__a , __a , __a):
# Create temp file
__snake_case ,__snake_case : Any = mkstemp()
__snake_case : Any = False
with fdopen(__a , 'w') as new_file:
with open(__a) as old_file:
for line in old_file:
new_file.write(__a)
if line_to_copy_below in line:
__snake_case : Union[str, Any] = True
for line_to_copy in lines_to_copy:
new_file.write(__a)
if not line_found:
raise ValueError(F"""Line {line_to_copy_below} was not found in file.""")
# Copy the file permissions from the old file to the new file
copymode(__a , __a)
# Remove original file
remove(__a)
# Move new file
move(__a , __a)
def skip_units(__a):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__a):
with open(__a) as datafile:
__snake_case : Optional[Any] = []
__snake_case : Optional[int] = False
__snake_case : List[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__snake_case : Optional[int] = line.split('"')[1]
__snake_case : Dict = skip_units(__a)
elif "# Below: " in line and "##" not in line:
__snake_case : Optional[Any] = line.split('"')[1]
__snake_case : Union[str, Any] = skip_units(__a)
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__a , __a , __a)
__snake_case : List[Any] = []
elif "# Replace with" in line and "##" not in line:
__snake_case : List[str] = []
elif "##" not in line:
lines_to_copy.append(__a)
remove(__a)
replace_in_files(F"""{directory}/to_replace_{lowercase_model_name}.py""")
os.rmdir(__a) | 61 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 61 | 1 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _lowercase ( nn.Module ):
def __init__( self , UpperCamelCase_ = 16 , UpperCamelCase_ = 88 , UpperCamelCase_ = None , UpperCamelCase_ = 1 , UpperCamelCase_ = 0.0 , UpperCamelCase_ = 32 , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "geglu" , UpperCamelCase_ = None , ):
super().__init__()
__magic_name__ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCamelCase_ , attention_head_dim=UpperCamelCase_ , in_channels=UpperCamelCase_ , num_layers=UpperCamelCase_ , dropout=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , cross_attention_dim=UpperCamelCase_ , attention_bias=UpperCamelCase_ , sample_size=UpperCamelCase_ , num_vector_embeds=UpperCamelCase_ , activation_fn=UpperCamelCase_ , num_embeds_ada_norm=UpperCamelCase_ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__magic_name__ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__magic_name__ = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__magic_name__ = [1, 0]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_ = True , ):
__magic_name__ = hidden_states
__magic_name__ = []
__magic_name__ = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__magic_name__ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__magic_name__ = self.transformer_index_for_condition[i]
__magic_name__ = self.transformers[transformer_index](
UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , timestep=UpperCamelCase_ , cross_attention_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__magic_name__ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__magic_name__ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCamelCase_ )
| 490 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
__lowerCamelCase = logging.getLogger(__name__)
__lowerCamelCase = {"facebook/bart-base": BartForConditionalGeneration}
__lowerCamelCase = {"facebook/bart-base": BartTokenizer}
def lowercase ( ) -> List[str]:
__magic_name__ = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=__UpperCamelCase , default=__UpperCamelCase , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=__UpperCamelCase , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=__UpperCamelCase , default=__UpperCamelCase , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=__UpperCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__UpperCamelCase , )
parser.add_argument(
'''--config_name''' , type=__UpperCamelCase , default=__UpperCamelCase , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=__UpperCamelCase , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=__UpperCamelCase , default=__UpperCamelCase , help='''Where to store the final ONNX file.''' )
__magic_name__ = parser.parse_args()
return args
def lowercase ( __UpperCamelCase , __UpperCamelCase="cpu" ) -> int:
__magic_name__ = model_dict[model_name].from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
__magic_name__ = tokenizer_dict[model_name].from_pretrained(__UpperCamelCase )
if model_name in ["facebook/bart-base"]:
__magic_name__ = 0
__magic_name__ = None
__magic_name__ = 0
return huggingface_model, tokenizer
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
model.eval()
__magic_name__ = None
__magic_name__ = torch.jit.script(BARTBeamSearchGenerator(__UpperCamelCase ) )
with torch.no_grad():
__magic_name__ = '''My friends are cool but they eat too many carbs.'''
__magic_name__ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device )
__magic_name__ = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=__UpperCamelCase , max_length=__UpperCamelCase , early_stopping=__UpperCamelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__UpperCamelCase , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __UpperCamelCase , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=__UpperCamelCase , )
logger.info('''Model exported to {}'''.format(__UpperCamelCase ) )
__magic_name__ = remove_dup_initializers(os.path.abspath(__UpperCamelCase ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(__UpperCamelCase ) )
__magic_name__ = onnxruntime.InferenceSession(__UpperCamelCase )
__magic_name__ = ort_sess.run(
__UpperCamelCase , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(__UpperCamelCase ),
'''max_length''': np.array(__UpperCamelCase ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def lowercase ( ) -> Any:
__magic_name__ = parse_args()
__magic_name__ = 5
__magic_name__ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
__magic_name__ = torch.device(args.device )
__magic_name__ , __magic_name__ = load_model_tokenizer(args.model_name_or_path , __UpperCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(__UpperCamelCase )
if args.max_length:
__magic_name__ = args.max_length
if args.num_beams:
__magic_name__ = args.num_beams
if args.output_file_path:
__magic_name__ = args.output_file_path
else:
__magic_name__ = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 490 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
__magic_name__: Optional[int] = SpeechTaTokenizer
__magic_name__: List[str] = False
__magic_name__: List[str] = True
def UpperCAmelCase_ ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : Tuple = SpeechTaTokenizer(_lowercase )
snake_case_ : List[str] = AddedToken('<mask>' , lstrip=_lowercase , rstrip=_lowercase )
snake_case_ : int = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : Tuple , _A : str ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[int] = """this is a test"""
snake_case_ : Dict = """this is a test"""
return input_text, output_text
def UpperCAmelCase_ ( self : Any , _A : List[Any] , _A : Dict=False , _A : Any=20 , _A : Optional[int]=5 ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = self.get_input_output_texts(_lowercase )
snake_case_ : List[Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
snake_case_ : Optional[Any] = tokenizer.decode(_lowercase , clean_up_tokenization_spaces=_lowercase )
return text, ids
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
"""simple docstring"""
snake_case_ : List[str] = """<pad>"""
snake_case_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def UpperCAmelCase_ ( self : Any ) -> int:
"""simple docstring"""
snake_case_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(_lowercase ) , 81 )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
snake_case_ : List[Any] = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case_ : Optional[Any] = tokenizer.vocab_size
snake_case_ : str = len(_lowercase )
self.assertNotEqual(_lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case_ : Dict = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
snake_case_ : Optional[Any] = tokenizer.add_tokens(_lowercase )
snake_case_ : int = tokenizer.vocab_size
snake_case_ : int = len(_lowercase )
self.assertNotEqual(_lowercase , 0 )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , len(_lowercase ) )
self.assertEqual(_lowercase , all_size + len(_lowercase ) )
snake_case_ : Optional[int] = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=_lowercase )
self.assertGreaterEqual(len(_lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case_ : Optional[Any] = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
snake_case_ : str = tokenizer.add_special_tokens(_lowercase )
snake_case_ : List[Any] = tokenizer.vocab_size
snake_case_ : Tuple = len(_lowercase )
self.assertNotEqual(_lowercase , 0 )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , len(_lowercase ) )
self.assertEqual(_lowercase , all_size_a + len(_lowercase ) )
snake_case_ : List[str] = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=_lowercase )
self.assertGreaterEqual(len(_lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def UpperCAmelCase_ ( self : str ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : Dict ) -> int:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : Any = self.get_tokenizer()
snake_case_ : Any = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(_lowercase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
snake_case_ : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
snake_case_ : List[str] = tokenizer.convert_tokens_to_ids(_lowercase )
# fmt: off
self.assertListEqual(_lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Tuple = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
snake_case_ : int = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=_lowercase , )
| 721 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_SCREAMING_SNAKE_CASE = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_SCREAMING_SNAKE_CASE = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
_SCREAMING_SNAKE_CASE = """zero2"""
_SCREAMING_SNAKE_CASE = """zero3"""
_SCREAMING_SNAKE_CASE = [ZEROa, ZEROa]
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
snake_case_ : Tuple = parameterized.to_safe_name('_'.join(str(__a ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
_SCREAMING_SNAKE_CASE = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
@parameterized.expand(_A , name_func=_A )
def UpperCAmelCase_ ( self : Tuple , _A : Optional[Any] , _A : Optional[int] ) -> Dict:
"""simple docstring"""
self.run_and_check(
stage=_A , model=_A , distributed=_A , fpaa=_A , )
@require_torch_multi_gpu
@parameterized.expand(_A , name_func=_A )
def UpperCAmelCase_ ( self : int , _A : Any , _A : int ) -> Dict:
"""simple docstring"""
self.run_and_check(
stage=_A , model=_A , distributed=_A , fpaa=_A , )
@parameterized.expand(_A , name_func=_A )
def UpperCAmelCase_ ( self : str , _A : List[str] , _A : str ) -> Tuple:
"""simple docstring"""
self.run_and_check(
stage=_A , model=_A , distributed=_A , fpaa=_A , )
@require_torch_multi_gpu
@parameterized.expand(_A , name_func=_A )
def UpperCAmelCase_ ( self : Optional[int] , _A : int , _A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.run_and_check(
stage=_A , model=_A , distributed=_A , fpaa=_A , )
def UpperCAmelCase_ ( self : Any , _A : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : List[str] , _A : str , _A : str , _A : int = 10 , _A : bool = True , _A : bool = True , _A : bool = True , ) -> Any:
"""simple docstring"""
snake_case_ : Dict = models[model]
snake_case_ : str = self.run_trainer(
stage=_A , model_name=_A , eval_steps=_A , num_train_epochs=1 , distributed=_A , fpaa=_A , )
self.do_checks(_A )
return output_dir
def UpperCAmelCase_ ( self : Any , _A : str , _A : str , _A : int = 10 , _A : int = 1 , _A : bool = True , _A : bool = True , ) -> Dict:
"""simple docstring"""
snake_case_ : str = self.get_auto_remove_tmp_dir('./xxx' , after=_A )
snake_case_ : Tuple = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_A )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
snake_case_ : Any = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
snake_case_ : str = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
snake_case_ : int = self.get_launcher(_A )
snake_case_ : List[str] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_A , env=self.get_env() )
return output_dir
def UpperCAmelCase_ ( self : Optional[Any] , _A : Optional[Any]=False ) -> List[str]:
"""simple docstring"""
snake_case_ : str = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 534 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCamelCase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple=False ):
lowerCamelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCamelCase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : Any=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase_ = ''''''
else:
lowerCamelCase_ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
lowerCamelCase_ = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( _lowerCamelCase : Dict ):
lowerCamelCase_ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : List[Any] ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
lowerCamelCase_ = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : int ):
lowerCamelCase_ = dct.pop(_lowerCamelCase )
lowerCamelCase_ = val
def lowerCamelCase_ ( _lowerCamelCase : str , _lowerCamelCase : Optional[Any] ):
lowerCamelCase_ = ViTMSNConfig()
lowerCamelCase_ = 1_0_0_0
lowerCamelCase_ = '''datasets/huggingface/label-files'''
lowerCamelCase_ = '''imagenet-1k-id2label.json'''
lowerCamelCase_ = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase ) , '''r''' ) )
lowerCamelCase_ = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCamelCase_ = 3_8_4
lowerCamelCase_ = 1_5_3_6
lowerCamelCase_ = 6
elif "l16" in checkpoint_url:
lowerCamelCase_ = 1_0_2_4
lowerCamelCase_ = 4_0_9_6
lowerCamelCase_ = 2_4
lowerCamelCase_ = 1_6
lowerCamelCase_ = 0.1
elif "b4" in checkpoint_url:
lowerCamelCase_ = 4
elif "l7" in checkpoint_url:
lowerCamelCase_ = 7
lowerCamelCase_ = 1_0_2_4
lowerCamelCase_ = 4_0_9_6
lowerCamelCase_ = 2_4
lowerCamelCase_ = 1_6
lowerCamelCase_ = 0.1
lowerCamelCase_ = ViTMSNModel(_lowerCamelCase )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location='''cpu''' )['''target_encoder''']
lowerCamelCase_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(_lowerCamelCase )
lowerCamelCase_ = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , base_model=_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
lowerCamelCase_ = ViTImageProcessor(
size=config.image_size , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
lowerCamelCase_ = image_processor(images=_lowerCamelCase , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
lowerCamelCase_ = model(**_lowerCamelCase )
lowerCamelCase_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCamelCase_ = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
lowerCamelCase_ = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
lowerCamelCase_ = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
lowerCamelCase_ = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
lowerCamelCase_ = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _lowerCamelCase , atol=1E-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__lowercase : Any = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 142 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Optional[Any] = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 142 | 1 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ,_UpperCamelCase ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] ) -> str:
_lowerCamelCase = load_tool('text-to-speech' )
self.tool.setup()
def _snake_case ( self : Tuple ) -> str:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_lowerCamelCase = self.tool('hey' )
_lowerCamelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def _snake_case ( self : Optional[Any] ) -> List[Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_lowerCamelCase = self.tool('hey' )
_lowerCamelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) ) | 713 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 234 | 0 |
"""simple docstring"""
def _snake_case ( ):
return 1
def _snake_case ( lowercase__ ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def _snake_case ( lowercase__ ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowercase__ )
def _snake_case ( lowercase__ ):
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowercase__ )
def _snake_case ( lowercase__ ):
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowercase__ )
def _snake_case ( lowercase__ ):
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowercase__ )
def _snake_case ( lowercase__ ):
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowercase__ )
def _snake_case ( lowercase__ ):
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowercase__ )
def _snake_case ( lowercase__ = 200 ):
return two_pound(lowercase__ )
if __name__ == "__main__":
print(solution(int(input().strip()))) | 630 |
"""simple docstring"""
import operator as op
def _snake_case ( lowercase__ ):
_lowerCamelCase : Dict = []
_lowerCamelCase : List[str] = lambda lowercase__ , lowercase__ : int(x / y ) # noqa: E731 integer division operation
_lowerCamelCase : Optional[int] = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(lowercase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowercase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' )
else:
_lowerCamelCase : int = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' )
_lowerCamelCase : Tuple = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' )
stack.append(
str(opr[x](int(lowercase__ ) , int(lowercase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
lowercase__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix)) | 630 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : Dict ) -> Optional[int]:
# test for the above condition
self.test()
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
A = 0
A = False
while not completed:
if counter == 1:
self.reset()
A = self.advance()
if not self.does_advance(__UpperCamelCase ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
A , A , A = self.update(__UpperCamelCase )
counter += 1
if counter > 10_000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self : Any , __UpperCamelCase : int ) -> List[Any]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self : str , __UpperCamelCase : int ) -> Dict:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self : Any ) -> List[str]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCamelCase ( self : int , __UpperCamelCase : int=False ) -> Dict:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : List[str] , __UpperCamelCase : List[int] ) -> str:
super(__UpperCamelCase , self ).__init__()
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or len(__UpperCamelCase ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(__UpperCamelCase , __UpperCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
A = token_ids
A = len(self.token_ids )
A = -1 # the index of the currently fulfilled step
A = False
def __UpperCamelCase ( self : Any ) -> int:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self : int , __UpperCamelCase : int ) -> Union[str, Any]:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(__UpperCamelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : int ) -> Optional[Any]:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(__UpperCamelCase )}''' )
A = False
A = False
A = False
if self.does_advance(__UpperCamelCase ):
self.fulfilled_idx += 1
A = True
if self.fulfilled_idx == (self.seqlen - 1):
A = True
A = completed
else:
# failed to make progress.
A = True
self.reset()
return stepped, completed, reset
def __UpperCamelCase ( self : List[str] ) -> Tuple:
A = False
A = 0
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
return self.seqlen - (self.fulfilled_idx + 1)
def __UpperCamelCase ( self : int , __UpperCamelCase : Union[str, Any]=False ) -> Optional[int]:
A = PhrasalConstraint(self.token_ids )
if stateful:
A = self.seqlen
A = self.fulfilled_idx
A = self.completed
return new_constraint
class lowerCAmelCase__ :
def __init__( self : int , __UpperCamelCase : List[List[int]] , __UpperCamelCase : str=True ) -> Optional[Any]:
A = max([len(__UpperCamelCase ) for one in nested_token_ids] )
A = {}
for token_ids in nested_token_ids:
A = root
for tidx, token_id in enumerate(__UpperCamelCase ):
if token_id not in level:
A = {}
A = level[token_id]
if no_subsets and self.has_subsets(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
f''' {nested_token_ids}.''' )
A = root
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : List[str] ) -> int:
A = self.trie
for current_token in current_seq:
A = start[current_token]
A = list(start.keys() )
return next_tokens
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Tuple ) -> int:
A = self.next_tokens(__UpperCamelCase )
return len(__UpperCamelCase ) == 0
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : Any ) -> Optional[int]:
A = list(root.values() )
if len(__UpperCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(__UpperCamelCase ) for nn in next_nodes] )
def __UpperCamelCase ( self : int , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> List[str]:
A = self.count_leaves(__UpperCamelCase )
return len(__UpperCamelCase ) != leaf_count
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : Tuple , __UpperCamelCase : List[List[int]] ) -> Any:
super(__UpperCamelCase , self ).__init__()
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or len(__UpperCamelCase ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(__UpperCamelCase , __UpperCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(__UpperCamelCase , __UpperCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
A = DisjunctiveTrie(__UpperCamelCase )
A = nested_token_ids
A = self.trie.max_height
A = []
A = False
def __UpperCamelCase ( self : str ) -> Dict:
A = self.trie.next_tokens(self.current_seq )
if len(__UpperCamelCase ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : int ) -> Any:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(__UpperCamelCase )}''' )
A = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : int ) -> Union[str, Any]:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(__UpperCamelCase )}''' )
A = False
A = False
A = False
if self.does_advance(__UpperCamelCase ):
self.current_seq.append(__UpperCamelCase )
A = True
else:
A = True
self.reset()
A = self.trie.reached_leaf(self.current_seq )
A = completed
return stepped, completed, reset
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
A = False
A = []
def __UpperCamelCase ( self : Dict ) -> List[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Optional[int]=False ) -> int:
A = DisjunctiveConstraint(self.token_ids )
if stateful:
A = self.seqlen
A = self.current_seq
A = self.completed
return new_constraint
class lowerCAmelCase__ :
def __init__( self : Optional[int] , __UpperCamelCase : List[Constraint] ) -> Dict:
A = constraints
# max # of steps required to fulfill a given constraint
A = max([c.seqlen for c in constraints] )
A = len(__UpperCamelCase )
A = False
self.init_state()
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
A = []
A = None
A = [constraint.copy(stateful=__UpperCamelCase ) for constraint in self.constraints]
def __UpperCamelCase ( self : Any ) -> List[str]:
A = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __UpperCamelCase ( self : str ) -> int:
A = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
A = constraint.advance()
if isinstance(__UpperCamelCase , __UpperCamelCase ):
token_list.append(__UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
token_list.extend(__UpperCamelCase )
else:
A = self.inprogress_constraint.advance()
if isinstance(__UpperCamelCase , __UpperCamelCase ):
token_list.append(__UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
token_list.extend(__UpperCamelCase )
if len(__UpperCamelCase ) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[List[int]] ) -> int:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
A , A = self.add(__UpperCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : int ) -> str:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
A , A = False, False
if self.completed:
A = True
A = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
A , A , A = self.inprogress_constraint.update(__UpperCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__UpperCamelCase ) )
A = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
A = None
if len(self.pending_constraints ) == 0:
# we're done!
A = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(__UpperCamelCase ):
A , A , A = pending_constraint.update(__UpperCamelCase )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(__UpperCamelCase )
A = None
if not complete and stepped:
A = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
A = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
A = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __UpperCamelCase ( self : Dict , __UpperCamelCase : Optional[int]=True ) -> str:
A = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
A = [
constraint.copy(stateful=__UpperCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
A = self.inprogress_constraint.copy(stateful=__UpperCamelCase )
A = [constraint.copy() for constraint in self.pending_constraints]
return new_state | 700 |
def lowerCamelCase_ ( lowerCAmelCase__ : list ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('The grid does not contain the appropriate information' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
A = grid[0]
for row_n in range(1 , len(lowerCAmelCase__ ) ):
A = grid[row_n]
A = fill_row(lowerCAmelCase__ , lowerCAmelCase__ )
A = grid[row_n]
return grid[-1][-1]
def lowerCamelCase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : list ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowerCAmelCase__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 224 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__SCREAMING_SNAKE_CASE = 5_00_00
__SCREAMING_SNAKE_CASE = 50_00
__SCREAMING_SNAKE_CASE = os.path.split(__file__)
__SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
for i in range(snake_case_ ):
lowerCAmelCase :List[str] = dataset[i]
@get_duration
def UpperCAmelCase ( a__ , a__ , a__ ):
'''simple docstring'''
for i in range(0 , len(snake_case_ ) , snake_case_ ):
lowerCAmelCase :Optional[int] = dataset[i : i + batch_size]
@get_duration
def UpperCAmelCase ( a__ , a__ , a__ ):
'''simple docstring'''
with dataset.formatted_as(type=snake_case_ ):
for i in range(snake_case_ ):
lowerCAmelCase :Dict = dataset[i]
@get_duration
def UpperCAmelCase ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
with dataset.formatted_as(type=snake_case_ ):
for i in range(0 , snake_case_ , snake_case_ ):
lowerCAmelCase :Optional[int] = dataset[i : i + batch_size]
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase :int = {'num examples': SPEED_TEST_N_EXAMPLES}
lowerCAmelCase :Optional[int] = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_00}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10_00}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10_00}),
]
lowerCAmelCase :Union[str, Any] = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_00}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10_00}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
lowerCAmelCase :Optional[int] = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
lowerCAmelCase :Any = generate_example_dataset(
os.path.join(snake_case_ , 'dataset.arrow' ) , snake_case_ , num_examples=snake_case_ , seq_shapes={'list': (1_00,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(snake_case_ ) )
lowerCAmelCase :Dict = func(snake_case_ , **snake_case_ )
print('shuffling dataset' )
lowerCAmelCase :Optional[int] = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(snake_case_ ) )
lowerCAmelCase :str = func(
snake_case_ , **snake_case_ )
with open(snake_case_ , 'wb' ) as f:
f.write(json.dumps(snake_case_ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating() | 553 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCamelCase__ : Tuple = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
UpperCamelCase__ : Union[str, Any] = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
UpperCamelCase__ : Any = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ,id='''token''' ) ,id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' ,id='''token''' ) ,id='''sequence''' ) ,id='''references''' ),
} ) ,)
def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : List[List[List[str]]] ,__lowerCamelCase : List[List[str]] ,__lowerCamelCase : int = 1 ,__lowerCamelCase : int = 4 ,):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__lowerCamelCase ,hypotheses=__lowerCamelCase ,min_len=__lowerCamelCase ,max_len=__lowerCamelCase )
}
| 387 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A_ : Optional[Any] = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
A_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 718 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A__: Dict = {'''tokenization_byt5''': ['''ByT5Tokenizer''']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
A__: List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 380 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__: Union[str, Any] = logging.get_logger(__name__)
A__: int = {'''vocab_file''': '''sentencepiece.model'''}
A__: Dict = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
A__: Any = {
'''google/rembert''': 256,
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str]=False , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Dict=True , __lowerCamelCase: Tuple="[CLS]" , __lowerCamelCase: List[str]="[SEP]" , __lowerCamelCase: List[str]="[UNK]" , __lowerCamelCase: Union[str, Any]="[SEP]" , __lowerCamelCase: List[Any]="[PAD]" , __lowerCamelCase: Optional[int]="[CLS]" , __lowerCamelCase: str="[MASK]" , **__lowerCamelCase: Union[str, Any] , ):
'''simple docstring'''
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase__: Optional[int] = do_lower_case
UpperCamelCase__: Optional[int] = remove_space
UpperCamelCase__: str = keep_accents
UpperCamelCase__: Union[str, Any] = vocab_file
UpperCamelCase__: Any = spm.SentencePieceProcessor()
self.sp_model.Load(__lowerCamelCase )
@property
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Any = self.__dict__.copy()
UpperCamelCase__: str = None
return state
def __setstate__( self: Optional[int] , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: int = d
UpperCamelCase__: List[Any] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: str , __lowerCamelCase: Optional[Any]=False ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = self.sp_model.EncodeAsPieces(__lowerCamelCase )
return pieces
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
return self.sp_model.PieceToId(__lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: Optional[int] ):
'''simple docstring'''
return self.sp_model.IdToPiece(__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = self.sp_model.decode_pieces(__lowerCamelCase )
return out_string
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase__: int = [self.sep_token_id]
UpperCamelCase__: int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase__: Dict = [self.sep_token_id]
UpperCamelCase__: Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(__lowerCamelCase ) )
return
UpperCamelCase__: Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 380 | 1 |
import argparse
from collections import defaultdict
import yaml
__lowerCamelCase = """docs/source/en/_toctree.yml"""
def UpperCamelCase__ ( UpperCAmelCase ) -> Tuple:
"""simple docstring"""
_a : Union[str, Any] = defaultdict(UpperCAmelCase )
for doc in model_doc:
counts[doc["local"]] += 1
_a : Union[str, Any] = [key for key, value in counts.items() if value > 1]
_a : Optional[int] = []
for duplicate_key in duplicates:
_a : int = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(UpperCAmelCase ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(UpperCAmelCase , key=lambda UpperCAmelCase : s["title"].lower() )
def UpperCamelCase__ ( UpperCAmelCase=False ) -> Union[str, Any]:
"""simple docstring"""
with open(UpperCAmelCase , encoding='''utf-8''' ) as f:
_a : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_a : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a : Optional[Any] = content[api_idx]['''sections''']
# Then to the model doc
_a : Dict = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_a : Union[str, Any] = api_doc[model_idx]['''sections''']
_a : Any = [(idx, section) for idx, section in enumerate(UpperCAmelCase ) if '''sections''' in section]
_a : Any = False
for idx, modality_doc in modalities_docs:
_a : Tuple = modality_doc['''sections''']
_a : Dict = clean_model_doc_toc(UpperCAmelCase )
if old_modality_doc != new_modality_doc:
_a : Dict = True
if overwrite:
_a : int = new_modality_doc
if diff:
if overwrite:
_a : Dict = model_doc
_a : int = api_doc
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(UpperCAmelCase , allow_unicode=UpperCAmelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__lowerCamelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite) | 720 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
_a : Any = LxmertConfig.from_json_file(UpperCAmelCase )
print(F'Building PyTorch model from configuration: {config}' )
_a : List[Any] = LxmertForPreTraining(UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCAmelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 307 | 0 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_lowerCAmelCase = logging.getLogger(__name__)
_lowerCAmelCase = """pytorch_model.bin"""
@dataclasses.dataclass
class _UpperCAmelCase :
a = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
a = dataclasses.field(
default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , )
@dataclasses.dataclass
class _UpperCAmelCase :
a = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
a = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
a = dataclasses.field(
default=a_ , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
a = dataclasses.field(
default=a_ , metadata={'''help''': '''The name of the task to train on.'''} , )
a = dataclasses.field(
default=a_ , metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class _UpperCAmelCase :
a = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
a = dataclasses.field(
default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} )
a = dataclasses.field(
default='''no''' , metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]'''
} , )
a = dataclasses.field(
default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
a = dataclasses.field(
default=0.0 , metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
} , )
a = dataclasses.field(
default=a_ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , )
a = dataclasses.field(
default=a_ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , )
a = dataclasses.field(
default=a_ , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , )
a = dataclasses.field(
default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , )
a = dataclasses.field(
default=100 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
a = dataclasses.field(
default=a_ , metadata={'''help''': '''Random seed for initialization.'''} , )
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : Optional[Any] = datasets.concatenate_datasets([infer_input, infer_output] ,axis=1 )
if args.do_filter_by_confidence:
A_ : Optional[int] = dataset.filter(lambda _lowerCAmelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
A_ : Union[str, Any] = int(eval_result * len(__snake_case ) )
print(__snake_case )
A_ : Dict = dataset.sort("""probability""" ,reverse=__snake_case )
A_ : List[str] = dataset.select(range(__snake_case ) )
A_ : Optional[Any] = dataset.remove_columns(["""label""", """probability"""] )
A_ : Any = dataset.rename_column("""prediction""" ,"""label""" )
A_ : Union[str, Any] = dataset.map(lambda _lowerCAmelCase : {"label": idalabel[example["label"]]} )
A_ : int = dataset.shuffle(seed=args.seed )
A_ : Optional[int] = os.path.join(__snake_case ,f"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(__snake_case ,index=__snake_case )
else:
dataset.to_json(__snake_case )
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ):
'''simple docstring'''
A_ : Optional[int] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO ,)
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
A_ : int = STModelArguments(model_name_or_path=__snake_case )
A_ : Any = STDataArguments(train_file=__snake_case ,infer_file=__snake_case )
A_ : Tuple = STTrainingArguments(output_dir=__snake_case )
A_ : List[str] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__snake_case ).items():
setattr(__snake_case ,__snake_case ,__snake_case )
for key, value in kwargs.items():
if hasattr(__snake_case ,__snake_case ):
setattr(__snake_case ,__snake_case ,__snake_case )
# Sanity checks
A_ : Optional[Any] = {}
A_ : Any = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
A_ : List[Any] = args.train_file
A_ : Tuple = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
A_ : Any = args.eval_file
for key in data_files:
A_ : List[str] = data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
A_ : Optional[Any] = extension
else:
assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
A_ : Dict = f"""{args.output_dir}/self-train_iter-{{}}""".format
A_ : Tuple = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir ,exist_ok=__snake_case )
os.makedirs(__snake_case ,exist_ok=__snake_case )
accelerator.wait_for_everyone()
A_ : Union[str, Any] = None
A_ : Tuple = None
A_ : Any = 0
A_ : Tuple = False
# Show the progress bar
A_ : str = tqdm(range(args.max_selftrain_iterations ) ,disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 ,int(args.max_selftrain_iterations ) ):
A_ : Optional[int] = data_dir_format(__snake_case )
assert os.path.exists(__snake_case )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
A_ : List[str] = os.path.join(__snake_case ,"""stage-1""" )
A_ : str = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__snake_case ,__snake_case ):
arguments_dict.update({key: value} )
A_ : List[str] = os.path.join(__snake_case ,"""best-checkpoint""" ,__snake_case )
if os.path.exists(__snake_case ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" ,__snake_case ,__snake_case ,)
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" ,__snake_case )
finetune(**__snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(__snake_case )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" ,__snake_case )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
A_ : List[Any] = os.path.join(__snake_case ,"""best-checkpoint""" )
A_ : str = os.path.join(__snake_case ,"""stage-2""" )
# Update arguments_dict
A_ : Union[str, Any] = model_path
A_ : Tuple = data_files["""train"""]
A_ : int = current_output_dir
A_ : Optional[Any] = os.path.join(__snake_case ,"""best-checkpoint""" ,__snake_case )
if os.path.exists(__snake_case ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" ,__snake_case ,__snake_case ,)
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" ,__snake_case )
finetune(**__snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(__snake_case )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" ,__snake_case )
A_ : Dict = iteration
A_ : int = data_dir_format(iteration + 1 )
A_ : Optional[int] = AutoConfig.from_pretrained(os.path.join(__snake_case ,"""best-checkpoint""" ) )
A_ : Tuple = config.idalabel
A_ : int = os.path.join(__snake_case ,"""eval_results_best-checkpoint.json""" )
A_ : List[str] = os.path.join(__snake_case ,"""test_results_best-checkpoint.json""" )
assert os.path.exists(__snake_case )
with open(__snake_case ,"""r""" ) as f:
A_ : List[Any] = float(json.load(__snake_case )[args.eval_metric] )
A_ : List[str] = os.path.join(__snake_case ,"""infer_output_best-checkpoint.csv""" )
assert os.path.exists(__snake_case )
# Loading the dataset from local csv or json files.
A_ : List[Any] = load_dataset(args.data_file_extension ,data_files={"""data""": data_files["""infer"""]} )["""data"""]
A_ : List[Any] = load_dataset("""csv""" ,data_files={"""data""": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(__snake_case ,exist_ok=__snake_case )
shutil.copy(__snake_case ,os.path.join(__snake_case ,f"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(__snake_case ):
shutil.copy(__snake_case ,os.path.join(__snake_case ,f"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
accelerator.wait_for_everyone()
A_ : Optional[int] = os.path.join(__snake_case ,f"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
A_ : Any = eval_result
if best_iteration is None:
A_ : int = new_iteration
A_ : List[str] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
A_ : Dict = new_iteration
A_ : Optional[Any] = new_eval_result
A_ : Tuple = 0
else:
if new_eval_result == best_eval_result:
A_ : Dict = new_iteration
A_ : Dict = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
A_ : Tuple = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" ,__snake_case )
logger.info("""Best evaluation result: %s = %f""" ,args.eval_metric ,__snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__snake_case ,f"""eval_results_iter-{iteration}.json""" ) ,os.path.join(__snake_case ,"""eval_results_best-iteration.json""" ) ,)
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" ,args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""" ,args.eval_metric ,__snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__snake_case ,f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) ,os.path.join(__snake_case ,"""eval_results_best-iteration.json""" ) ,)
| 569 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
snake_case : List[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case : List[Any] = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
snake_case : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": 5_1_2,
}
snake_case : List[Any] = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class UpperCamelCase__ ( a_):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = LxmertTokenizer
def __init__( self : Union[str, Any] , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict="[UNK]" , UpperCamelCase_ : str="[SEP]" , UpperCamelCase_ : Dict="[PAD]" , UpperCamelCase_ : Optional[int]="[CLS]" , UpperCamelCase_ : Any="[MASK]" , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[Any]=None , **UpperCamelCase_ : Dict , ):
'''simple docstring'''
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
__magic_name__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase_ ) != tokenize_chinese_chars
):
__magic_name__ = getattr(UpperCamelCase_ , normalizer_state.pop('type' ) )
__magic_name__ = do_lower_case
__magic_name__ = strip_accents
__magic_name__ = tokenize_chinese_chars
__magic_name__ = normalizer_class(**UpperCamelCase_ )
__magic_name__ = do_lower_case
def a__ ( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int]=None ):
'''simple docstring'''
__magic_name__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ ( self : int , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
'''simple docstring'''
__magic_name__ = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ ) | 545 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = ["pixel_values"]
def __init__( self : str , UpperCamelCase_ : Tuple = True , UpperCamelCase_ : Union[str, Any] = None , UpperCamelCase_ : Union[str, Any] = PILImageResampling.BILINEAR , UpperCamelCase_ : int = True , UpperCamelCase_ : Union[str, Any] = None , UpperCamelCase_ : List[Any] = True , UpperCamelCase_ : Union[str, Any] = 1 / 255 , UpperCamelCase_ : Union[str, Any] = True , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[int] = None , **UpperCamelCase_ : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**__a )
lowerCamelCase_ : Any = size if size is not None else {'''shortest_edge''': 256}
lowerCamelCase_ : Any = get_size_dict(__a , default_to_square=__a )
lowerCamelCase_ : List[str] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowerCamelCase_ : Dict = get_size_dict(__a )
lowerCamelCase_ : Union[str, Any] = do_resize
lowerCamelCase_ : Any = size
lowerCamelCase_ : List[Any] = resample
lowerCamelCase_ : Tuple = do_center_crop
lowerCamelCase_ : int = crop_size
lowerCamelCase_ : Dict = do_rescale
lowerCamelCase_ : Tuple = rescale_factor
lowerCamelCase_ : Union[str, Any] = do_normalize
lowerCamelCase_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] = PILImageResampling.BICUBIC , UpperCamelCase_ : Optional[int] = None , **UpperCamelCase_ : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ : Tuple = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCamelCase_ : int = get_resize_output_image_size(__a , size=size['''shortest_edge'''] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] = None , **UpperCamelCase_ : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = get_size_dict(__a )
return center_crop(__a , size=(size['''height'''], size['''width''']) , data_format=__a , **__a )
def __UpperCamelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple = None , **UpperCamelCase_ : Any ) -> np.ndarray:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def __UpperCamelCase ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str = None , **UpperCamelCase_ : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __UpperCamelCase ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : int = None , UpperCamelCase_ : Dict = None , UpperCamelCase_ : Union[str, Any] = None , UpperCamelCase_ : Tuple = None , UpperCamelCase_ : Optional[Any] = None , UpperCamelCase_ : Tuple = None , UpperCamelCase_ : Optional[Any] = None , UpperCamelCase_ : Any = None , UpperCamelCase_ : List[Any] = None , UpperCamelCase_ : str = None , UpperCamelCase_ : int = None , UpperCamelCase_ : Union[str, Any] = ChannelDimension.FIRST , **UpperCamelCase_ : Tuple , ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ : Dict = size if size is not None else self.size
lowerCamelCase_ : List[str] = get_size_dict(__a , default_to_square=__a )
lowerCamelCase_ : Any = resample if resample is not None else self.resample
lowerCamelCase_ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ : Any = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ : Any = get_size_dict(__a )
lowerCamelCase_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ : Any = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ : List[str] = image_std if image_std is not None else self.image_std
lowerCamelCase_ : str = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCamelCase_ : int = [to_numpy_array(__a ) for image in images]
if do_resize:
lowerCamelCase_ : Tuple = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
lowerCamelCase_ : Optional[int] = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
lowerCamelCase_ : Optional[Any] = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
lowerCamelCase_ : List[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
lowerCamelCase_ : Any = [to_channel_dimension_format(__a , __a ) for image in images]
lowerCamelCase_ : Dict = {'''pixel_values''': images}
return BatchFeature(data=__a , tensor_type=__a )
| 712 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCAmelCase )
lowerCamelCase_ : str = FlaxAutoModelForSeqaSeqLM.from_config(config=__UpperCAmelCase )
lowerCamelCase_ : Optional[int] = checkpoints.load_tax_checkpoint(__UpperCAmelCase )
lowerCamelCase_ : Any = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
lowerCamelCase_ : Dict = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowerCamelCase_ : List[str] = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase_ : List[str] = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
lowerCamelCase_ : List[str] = F"""layers_{str(__UpperCAmelCase )}"""
# Self-Attention
lowerCamelCase_ : Dict = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
lowerCamelCase_ : Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
lowerCamelCase_ : Dict = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
lowerCamelCase_ : str = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase_ : List[Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
lowerCamelCase_ : Any = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
lowerCamelCase_ : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowerCamelCase_ : Dict = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowerCamelCase_ : List[Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowerCamelCase_ : Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowerCamelCase_ : int = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowerCamelCase_ : Tuple = flax_model.params['''encoder''']['''block'''][str(__UpperCAmelCase )]['''layer''']
lowerCamelCase_ : str = tax_attention_key
lowerCamelCase_ : Any = tax_attention_out
lowerCamelCase_ : List[str] = tax_attention_query
lowerCamelCase_ : int = tax_attention_value
lowerCamelCase_ : Dict = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase_ : Optional[Any] = tax_global_layer_norm
if split_mlp_wi:
lowerCamelCase_ : Optional[Any] = tax_mlp_wi_a
lowerCamelCase_ : Any = tax_mlp_wi_a
else:
lowerCamelCase_ : List[Any] = tax_mlp_wi
lowerCamelCase_ : Tuple = tax_mlp_wo
lowerCamelCase_ : Tuple = tax_mlp_layer_norm
lowerCamelCase_ : int = flax_model_encoder_layer_block
# Only for layer 0:
lowerCamelCase_ : Tuple = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
lowerCamelCase_ : Any = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase_ : Dict = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
lowerCamelCase_ : Dict = tax_encoder_global_rel_embedding
# Assigning
lowerCamelCase_ : Dict = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
lowerCamelCase_ : List[Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowerCamelCase_ : Dict = F"""layers_{str(__UpperCAmelCase )}"""
# Self-Attention
lowerCamelCase_ : Any = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
lowerCamelCase_ : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
lowerCamelCase_ : str = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
lowerCamelCase_ : str = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
lowerCamelCase_ : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
lowerCamelCase_ : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
lowerCamelCase_ : List[str] = tax_enc_dec_attention_module['''key''']['''kernel''']
lowerCamelCase_ : Union[str, Any] = tax_enc_dec_attention_module['''out''']['''kernel''']
lowerCamelCase_ : Optional[int] = tax_enc_dec_attention_module['''query''']['''kernel''']
lowerCamelCase_ : List[str] = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
lowerCamelCase_ : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
lowerCamelCase_ : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowerCamelCase_ : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowerCamelCase_ : Any = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowerCamelCase_ : str = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowerCamelCase_ : int = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowerCamelCase_ : List[Any] = flax_model.params['''decoder''']['''block'''][str(__UpperCAmelCase )]['''layer''']
lowerCamelCase_ : str = tax_attention_key
lowerCamelCase_ : Optional[Any] = tax_attention_out
lowerCamelCase_ : Optional[Any] = tax_attention_query
lowerCamelCase_ : str = tax_attention_value
lowerCamelCase_ : Tuple = tax_pre_attention_layer_norm
lowerCamelCase_ : Optional[Any] = tax_enc_dec_attention_key
lowerCamelCase_ : Tuple = tax_enc_dec_attention_out
lowerCamelCase_ : Any = tax_enc_dec_attention_query
lowerCamelCase_ : Optional[Any] = tax_enc_dec_attention_value
lowerCamelCase_ : List[str] = tax_cross_layer_norm
if split_mlp_wi:
lowerCamelCase_ : List[Any] = tax_mlp_wi_a
lowerCamelCase_ : Optional[int] = tax_mlp_wi_a
else:
lowerCamelCase_ : Tuple = tax_mlp_wi
lowerCamelCase_ : Optional[int] = tax_mlp_wo
lowerCamelCase_ : Tuple = txa_mlp_layer_norm
lowerCamelCase_ : Optional[Any] = flax_model_decoder_layer_block
# Decoder Normalization
lowerCamelCase_ : int = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
lowerCamelCase_ : Optional[int] = txa_decoder_norm
# Only for layer 0:
lowerCamelCase_ : Dict = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
lowerCamelCase_ : List[Any] = tax_decoder_rel_embedding
# Token Embeddings
lowerCamelCase_ : Tuple = tax_model['''target''']['''token_embedder''']['''embedding''']
lowerCamelCase_ : Union[str, Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowerCamelCase_ : Optional[Any] = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(__UpperCAmelCase )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
__lowerCamelCase : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 418 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCAmelCase__ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowercase_ = BertJapaneseTokenizer
lowercase_ = False
lowercase_ = True
def __UpperCamelCase ( self ):
'''simple docstring'''
super().setUp()
__A =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
__A =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
__A ='''こんにちは、世界。 \nこんばんは、世界。'''
__A ='''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
__A , __A =self.get_input_output_texts(_a )
__A =tokenizer.encode(_a , add_special_tokens=_a )
__A =tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
return text, ids
def __UpperCamelCase ( self ):
'''simple docstring'''
pass # TODO add if relevant
def __UpperCamelCase ( self ):
'''simple docstring'''
pass # TODO add if relevant
def __UpperCamelCase ( self ):
'''simple docstring'''
pass # TODO add if relevant
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.tokenizer_class(self.vocab_file )
__A =tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(_a , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(_a )
__A ='''こんにちは、世界。\nこんばんは、世界。'''
__A =tokenizer.tokenize(_a )
self.assertListEqual(_a , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__A =os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_a , '''wb''' ) as handle:
pickle.dump(_a , _a )
with open(_a , '''rb''' ) as handle:
__A =pickle.load(_a )
__A =tokenizer_new.tokenize(_a )
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __UpperCamelCase ( self ):
'''simple docstring'''
try:
__A =MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __UpperCamelCase ( self ):
'''simple docstring'''
try:
__A =MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =MecabTokenizer(do_lower_case=_a , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __UpperCamelCase ( self ):
'''simple docstring'''
try:
__A =MecabTokenizer(
do_lower_case=_a , normalize_text=_a , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =MecabTokenizer(normalize_text=_a , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(_a )
__A ='''こんにちは、世界。\nこんばんは、世界。'''
__A =tokenizer.tokenize(_a )
self.assertListEqual(_a , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__A =os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_a , '''wb''' ) as handle:
pickle.dump(_a , _a )
with open(_a , '''rb''' ) as handle:
__A =pickle.load(_a )
__A =tokenizer_new.tokenize(_a )
self.assertListEqual(_a , _a )
@require_sudachi
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =SudachiTokenizer(do_lower_case=_a , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =SudachiTokenizer(normalize_text=_a , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =SudachiTokenizer(trim_whitespace=_a , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(_a )
__A ='''こんにちは、世界。\nこんばんは、世界。'''
__A =tokenizer.tokenize(_a )
self.assertListEqual(_a , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__A =os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_a , '''wb''' ) as handle:
pickle.dump(_a , _a )
with open(_a , '''rb''' ) as handle:
__A =pickle.load(_a )
__A =tokenizer_new.tokenize(_a )
self.assertListEqual(_a , _a )
@require_jumanpp
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =JumanppTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =JumanppTokenizer(normalize_text=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =JumanppTokenizer(trim_whitespace=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
__A ={}
for i, token in enumerate(_a ):
__A =i
__A =WordpieceTokenizer(vocab=_a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
__A =tokenizer.subword_tokenizer
__A =subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(_a , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
__A =subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(_a , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
__A =tokenizer.encode('''ありがとう。''' , add_special_tokens=_a )
__A =tokenizer.encode('''どういたしまして。''' , add_special_tokens=_a )
__A =tokenizer.build_inputs_with_special_tokens(_a )
__A =tokenizer.build_inputs_with_special_tokens(_a , _a )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase__ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowercase_ = BertJapaneseTokenizer
lowercase_ = False
def __UpperCamelCase ( self ):
'''simple docstring'''
super().setUp()
__A =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
__A =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __UpperCamelCase ( self , **lowercase__ ):
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **_a )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
__A ='''こんにちは、世界。 \nこんばんは、世界。'''
__A ='''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def __UpperCamelCase ( self ):
'''simple docstring'''
pass # TODO add if relevant
def __UpperCamelCase ( self ):
'''simple docstring'''
pass # TODO add if relevant
def __UpperCamelCase ( self ):
'''simple docstring'''
pass # TODO add if relevant
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
__A =tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
_a , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
__A ={}
for i, token in enumerate(_a ):
__A =i
__A =CharacterTokenizer(vocab=_a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
__A =tokenizer.encode('''ありがとう。''' , add_special_tokens=_a )
__A =tokenizer.encode('''どういたしまして。''' , add_special_tokens=_a )
__A =tokenizer.build_inputs_with_special_tokens(_a )
__A =tokenizer.build_inputs_with_special_tokens(_a , _a )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
'''simple docstring'''
__A ='''cl-tohoku/bert-base-japanese'''
__A =AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , _a )
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
'''simple docstring'''
__A ='''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(_a )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
__A ='''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(_a )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 184 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
'''simple docstring'''
def __init__( self:Optional[Any] , _a:int , _a:str=3 , _a:Optional[int]=32 , _a:Optional[Any]=3 , _a:Tuple=10 , _a:List[Any]=[8, 16, 32, 64] , _a:str=[1, 1, 2, 1] , _a:Any=True , _a:List[Any]=True , _a:List[str]="relu" , _a:int=3 , _a:Tuple=None , _a:Tuple=["stage2", "stage3", "stage4"] , _a:List[Any]=[2, 3, 4] , _a:Union[str, Any]=1 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = image_size
snake_case__ = num_channels
snake_case__ = embeddings_size
snake_case__ = hidden_sizes
snake_case__ = depths
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = hidden_act
snake_case__ = num_labels
snake_case__ = scope
snake_case__ = len(_a )
snake_case__ = out_features
snake_case__ = out_indices
snake_case__ = num_groups
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Optional[int] , _a:Tuple , _a:int ):
snake_case__ = BitModel(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Tuple , _a:Any , _a:Union[str, Any] ):
snake_case__ = self.num_labels
snake_case__ = BitForImageClassification(_a )
model.to(_a )
model.eval()
snake_case__ = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:str , _a:List[str] , _a:Any ):
snake_case__ = BitBackbone(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case__ = None
snake_case__ = BitBackbone(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__lowercase : int = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Tuple = False
__lowercase : Optional[Any] = False
__lowercase : str = False
__lowercase : Tuple = False
__lowercase : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = BitModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a , has_text_modality=_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return
@unittest.skip(reason='''Bit does not output attentions''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
pass
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a )
snake_case__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
def check_hidden_states_output(_a:List[Any] , _a:int , _a:Union[str, Any] ):
snake_case__ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
snake_case__ = model(**self._prepare_for_class(_a , _a ) )
snake_case__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case__ = layer_type
snake_case__ = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ = True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE ( ) -> Any:
snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
snake_case__ = model(**_a )
# verify the logits
snake_case__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _a )
snake_case__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (BitBackbone,) if is_torch_available() else ()
__lowercase : int = BitConfig
__lowercase : Any = False
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = BitModelTester(self )
| 33 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_UpperCAmelCase = cst_fwd.get(__snake_case , np.inf )
_UpperCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_UpperCAmelCase = new_cost_f
_UpperCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case ) -> int:
_UpperCAmelCase = -1
_UpperCAmelCase = set()
_UpperCAmelCase = set()
_UpperCAmelCase = {source: 0}
_UpperCAmelCase = {destination: 0}
_UpperCAmelCase = {source: None}
_UpperCAmelCase = {destination: None}
_UpperCAmelCase = PriorityQueue()
_UpperCAmelCase = PriorityQueue()
_UpperCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_UpperCAmelCase , _UpperCAmelCase = queue_forward.get()
visited_forward.add(__snake_case )
_UpperCAmelCase , _UpperCAmelCase = queue_backward.get()
visited_backward.add(__snake_case )
_UpperCAmelCase = pass_and_relaxation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_UpperCAmelCase = pass_and_relaxation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_UpperCAmelCase = shortest_distance
return shortest_path_distance
__a: Optional[Any] = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__a: Any = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 402 |
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> float:
return 1_0 - x * x
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(__snake_case ) * equation(__snake_case ) >= 0:
raise ValueError("""Wrong space!""" )
_UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
_UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(__snake_case ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__snake_case ) * equation(__snake_case ) < 0:
_UpperCAmelCase = c
else:
_UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 402 | 1 |