code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_snake_case = sys.version_info >= (3, 10)
def snake_case ( _a: Optional[Any]=None , _a: Tuple=None )-> Any:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class _a :
a_ : List[Any] = 42
a_ : Tuple = 42
a_ : Dict = 42
a_ : List[Any] = 42
@dataclass
class _a :
a_ : Any = 42
a_ : Any = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class _a :
a_ : Dict = False
a_ : Union[str, Any] = True
a_ : Any = None
class _a ( __snake_case ):
a_ : str = 'titi'
a_ : Tuple = 'toto'
class _a ( __snake_case ):
a_ : Optional[int] = 'titi'
a_ : Optional[Any] = 'toto'
a_ : Optional[int] = 42
@dataclass
class _a :
a_ : Optional[Any] = 'toto'
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = BasicEnum(self.foo )
@dataclass
class _a :
a_ : Any = 'toto'
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = MixedTypeEnum(self.foo )
@dataclass
class _a :
a_ : List[Any] = None
a_ : Tuple = field(default=__snake_case , metadata={'help': 'help message'} )
a_ : Any = None
a_ : List[str] = list_field(default=[] )
a_ : Any = list_field(default=[] )
@dataclass
class _a :
a_ : Any = list_field(default=[] )
a_ : int = list_field(default=[1, 2, 3] )
a_ : Union[str, Any] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
a_ : str = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _a :
a_ : Optional[int] = field()
a_ : Tuple = field()
a_ : Any = field()
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = BasicEnum(self.required_enum )
@dataclass
class _a :
a_ : List[str] = 42
a_ : str = field()
a_ : Any = None
a_ : Optional[int] = field(default='toto' , metadata={'help': 'help message'} )
a_ : Union[str, Any] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class _a :
a_ : Dict = False
a_ : Dict = True
a_ : List[Any] = None
@dataclass
class _a :
a_ : str = None
a_ : int = field(default=__snake_case , metadata={'help': 'help message'} )
a_ : Optional[int] = None
a_ : Union[str, Any] = list_field(default=[] )
a_ : List[str] = list_field(default=[] )
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCamelCase__ = {k: v for k, v in vars(SCREAMING_SNAKE_CASE__ ).items() if k != 'container'}
lowerCamelCase__ = {k: v for k, v in vars(SCREAMING_SNAKE_CASE__ ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , SCREAMING_SNAKE_CASE__ ) and yy.get('choices' , SCREAMING_SNAKE_CASE__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](SCREAMING_SNAKE_CASE__ ) , yy['type'](SCREAMING_SNAKE_CASE__ ) )
del xx["type"], yy["type"]
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = HfArgumentParser(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ )
expected.add_argument('--bar' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ )
expected.add_argument('--baz' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ )
expected.add_argument('--flag' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , const=SCREAMING_SNAKE_CASE__ , nargs='?' )
self.argparsersEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((lowerCamelCase__ ) , ) = parser.parse_args_into_dataclasses(SCREAMING_SNAKE_CASE__ , look_for_args_file=SCREAMING_SNAKE_CASE__ )
self.assertFalse(example.flag )
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = HfArgumentParser(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=SCREAMING_SNAKE_CASE__ )
expected.add_argument('--baz' , default='toto' , type=SCREAMING_SNAKE_CASE__ , help='help message' )
self.argparsersEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , const=SCREAMING_SNAKE_CASE__ , nargs='?' )
expected.add_argument('--baz' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , const=SCREAMING_SNAKE_CASE__ , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=SCREAMING_SNAKE_CASE__ , dest='baz' )
expected.add_argument('--opt' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(SCREAMING_SNAKE_CASE__ )
for dataclass_type in dataclass_types:
lowerCamelCase__ = HfArgumentParser(SCREAMING_SNAKE_CASE__ )
self.argparsersEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = parser.parse_args([] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , Namespace(foo=SCREAMING_SNAKE_CASE__ , baz=SCREAMING_SNAKE_CASE__ , opt=SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , Namespace(foo=SCREAMING_SNAKE_CASE__ , baz=SCREAMING_SNAKE_CASE__ , opt=SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , Namespace(foo=SCREAMING_SNAKE_CASE__ , baz=SCREAMING_SNAKE_CASE__ , opt=SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , Namespace(foo=SCREAMING_SNAKE_CASE__ , baz=SCREAMING_SNAKE_CASE__ , opt=SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , Namespace(foo=SCREAMING_SNAKE_CASE__ , baz=SCREAMING_SNAKE_CASE__ , opt=SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = HfArgumentParser(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
lowerCamelCase__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCamelCase__ = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
lowerCamelCase__ = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCamelCase__ = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
lowerCamelCase__ = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _UpperCamelCase ( self : Any ):
@dataclass
class _a :
a_ : Union[str, Any] = 'toto'
lowerCamelCase__ = HfArgumentParser(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
lowerCamelCase__ = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
lowerCamelCase__ = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = HfArgumentParser(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=SCREAMING_SNAKE_CASE__ )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=SCREAMING_SNAKE_CASE__ )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=SCREAMING_SNAKE_CASE__ )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=SCREAMING_SNAKE_CASE__ )
self.argparsersEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = parser.parse_args([] )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCamelCase__ = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(SCREAMING_SNAKE_CASE__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ )
expected.add_argument('--bar' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help='help message' )
expected.add_argument('--baz' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ )
expected.add_argument('--ces' , nargs='+' , default=[] , type=SCREAMING_SNAKE_CASE__ )
expected.add_argument('--des' , nargs='+' , default=[] , type=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(SCREAMING_SNAKE_CASE__ )
for dataclass_type in dataclass_types:
lowerCamelCase__ = HfArgumentParser(SCREAMING_SNAKE_CASE__ )
self.argparsersEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = parser.parse_args([] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , Namespace(foo=SCREAMING_SNAKE_CASE__ , bar=SCREAMING_SNAKE_CASE__ , baz=SCREAMING_SNAKE_CASE__ , ces=[] , des=[] ) )
lowerCamelCase__ = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(SCREAMING_SNAKE_CASE__ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = HfArgumentParser(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ )
expected.add_argument('--required_str' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=SCREAMING_SNAKE_CASE__ , )
self.argparsersEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = HfArgumentParser(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=SCREAMING_SNAKE_CASE__ , )
expected.add_argument('--opt' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ )
expected.add_argument('--baz' , default='toto' , type=SCREAMING_SNAKE_CASE__ , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=SCREAMING_SNAKE_CASE__ )
self.argparsersEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = HfArgumentParser(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
lowerCamelCase__ = parser.parse_dict(SCREAMING_SNAKE_CASE__ )[0]
lowerCamelCase__ = BasicExample(**SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = HfArgumentParser(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(SCREAMING_SNAKE_CASE__ , parser.parse_dict , SCREAMING_SNAKE_CASE__ , allow_extra_keys=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = HfArgumentParser(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , 'temp_json' )
os.mkdir(SCREAMING_SNAKE_CASE__ )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
lowerCamelCase__ = BasicExample(**SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = HfArgumentParser(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , 'temp_yaml' )
os.mkdir(SCREAMING_SNAKE_CASE__ )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
lowerCamelCase__ = BasicExample(**SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = HfArgumentParser(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 510 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 585 | 0 |
from __future__ import annotations
def snake_case_ ( __lowercase , __lowercase ):
print(F'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(__lowercase ):
print(F'''{i}\t\t{d}''' )
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
for j in range(__lowercase ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : int = [float('''inf''' )] * vertex_count
UpperCAmelCase_ : Optional[Any] = 0.0
for _ in range(vertex_count - 1 ):
for j in range(__lowercase ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
UpperCAmelCase_ : Tuple = distance[u] + w
UpperCAmelCase_ : Dict = check_negative_cycle(__lowercase , __lowercase , __lowercase )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Optional[int] = int(input('Enter number of vertices: ').strip())
__UpperCamelCase : Optional[int] = int(input('Enter number of edges: ').strip())
__UpperCamelCase : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
__UpperCamelCase : Optional[Any] = {'src': src, 'dst': dest, 'weight': weight}
__UpperCamelCase : Union[str, Any] = int(input('\nEnter shortest path source:').strip())
__UpperCamelCase : Union[str, Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0) | 641 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : int = multiprocessing.Manager()
UpperCAmelCase_ : Union[str, Any] = manager.list()
UpperCAmelCase_ : int = multiprocessing.Process(target=__lowercase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCAmelCase_ : str = shutil.rmtree
UpperCAmelCase_ : Tuple = os.rmdir
UpperCAmelCase_ : Dict = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCAmelCase_ : Optional[int] = {}
with swallow_io():
with time_limit(__lowercase ):
exec(__lowercase , __lowercase )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
UpperCAmelCase_ : Optional[int] = rmtree
UpperCAmelCase_ : Optional[Any] = rmdir
UpperCAmelCase_ : Optional[Any] = chdir
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
def signal_handler(__lowercase , __lowercase ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , __lowercase )
signal.signal(signal.SIGALRM , __lowercase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def snake_case_ ( ):
UpperCAmelCase_ : Optional[Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(__lowercase ):
with contextlib.redirect_stderr(__lowercase ):
with redirect_stdin(__lowercase ):
yield
@contextlib.contextmanager
def snake_case_ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__lowercase ):
yield dirname
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
pass
class lowerCAmelCase__( io.StringIO ):
'''simple docstring'''
def _lowerCamelCase ( self : Dict , *__snake_case : List[Any] , **__snake_case : int ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Dict , *__snake_case : int , **__snake_case : Any ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : int , *__snake_case : List[str] , **__snake_case : Optional[Any] ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
return False
class lowerCAmelCase__( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
A_ : Optional[Any] = 'stdin'
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
if root == ".":
yield
return
UpperCAmelCase_ : Tuple = os.getcwd()
os.chdir(__lowercase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__lowercase )
def snake_case_ ( __lowercase=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = None
import os
UpperCAmelCase_ : Union[str, Any] = '''1'''
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
import shutil
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
import subprocess
UpperCAmelCase_ : Dict = None # type: ignore
UpperCAmelCase_ : Union[str, Any] = None
import sys
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None | 641 | 1 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _snake_case :
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[int]=13 ,SCREAMING_SNAKE_CASE__ : Tuple=7 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : int=99 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=64 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=32 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5 ,SCREAMING_SNAKE_CASE__ : Optional[int]=4 ,SCREAMING_SNAKE_CASE__ : List[Any]=37 ,SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : str=512 ,SCREAMING_SNAKE_CASE__ : Any=16 ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : Dict=0.02 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 ,SCREAMING_SNAKE_CASE__ : int=None ,):
SCREAMING_SNAKE_CASE:Optional[Any] = parent
SCREAMING_SNAKE_CASE:List[Any] = batch_size
SCREAMING_SNAKE_CASE:str = seq_length
SCREAMING_SNAKE_CASE:str = is_training
SCREAMING_SNAKE_CASE:Optional[Any] = use_input_mask
SCREAMING_SNAKE_CASE:Optional[int] = use_token_type_ids
SCREAMING_SNAKE_CASE:Optional[Any] = use_labels
SCREAMING_SNAKE_CASE:Dict = vocab_size
SCREAMING_SNAKE_CASE:int = hidden_size
SCREAMING_SNAKE_CASE:List[Any] = embedding_size
SCREAMING_SNAKE_CASE:Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE:Any = num_attention_heads
SCREAMING_SNAKE_CASE:Any = intermediate_size
SCREAMING_SNAKE_CASE:List[Any] = hidden_act
SCREAMING_SNAKE_CASE:Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE:Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE:List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE:Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE:int = type_sequence_label_size
SCREAMING_SNAKE_CASE:Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE:Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE:Any = num_choices
SCREAMING_SNAKE_CASE:str = scope
def __UpperCamelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE:Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE:Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE:List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE:List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE:List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE:Tuple = None
SCREAMING_SNAKE_CASE:Optional[int] = None
SCREAMING_SNAKE_CASE:int = None
if self.use_labels:
SCREAMING_SNAKE_CASE:Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE:Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE:Tuple = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE:Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Union[str, Any] ):
return MegatronBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=SCREAMING_SNAKE_CASE__ ,initializer_range=self.initializer_range ,)
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int ):
SCREAMING_SNAKE_CASE:Tuple = MegatronBertModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Tuple = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = model(SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:Optional[Any] = MegatronBertForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Any = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ):
SCREAMING_SNAKE_CASE:List[Any] = MegatronBertForCausalLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Tuple = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = MegatronBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Dict = model(
SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Dict ):
SCREAMING_SNAKE_CASE:Optional[int] = MegatronBertForPreTraining(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Tuple = model(
SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ ,next_sentence_label=SCREAMING_SNAKE_CASE__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ):
SCREAMING_SNAKE_CASE:Optional[Any] = MegatronBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:int = model(
SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,start_positions=SCREAMING_SNAKE_CASE__ ,end_positions=SCREAMING_SNAKE_CASE__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE:Tuple = MegatronBertForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Any = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE:Optional[int] = MegatronBertForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Optional[int] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Dict ):
SCREAMING_SNAKE_CASE:List[str] = self.num_choices
SCREAMING_SNAKE_CASE:Dict = MegatronBertForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:List[Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE:Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE:Optional[int] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE:List[str] = model(
SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
):Any = config_and_inputs
SCREAMING_SNAKE_CASE:Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( _a , _a , unittest.TestCase ):
_A : Any = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_A : List[str] = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : str = True
# test_resize_embeddings = False
_A : List[Any] = False
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Tuple=False ):
SCREAMING_SNAKE_CASE:Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:Optional[int] = MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE:Dict = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,hidden_size=37 )
def __UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*SCREAMING_SNAKE_CASE__ )
def A_ ( snake_case ):
return torch.tensor(
snake_case , dtype=torch.long , device=snake_case , )
A_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
@unittest.skip("Model is not available." )
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:List[str] = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE:List[Any] = os.path.join(os.environ["MYDIR"] ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[Any] = MegatronBertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.half()
SCREAMING_SNAKE_CASE:str = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE:Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )[0]
SCREAMING_SNAKE_CASE:Optional[Any] = torch.Size((1, 9, 1_024) )
self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[int] = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE:Dict = output[0, ii, jj]
SCREAMING_SNAKE_CASE:int = expected[3 * ii + jj]
SCREAMING_SNAKE_CASE:Tuple = "ii={} jj={} a={} b={}".format(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
self.assertTrue(math.isclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,rel_tol=SCREAMING_SNAKE_CASE__ ,abs_tol=SCREAMING_SNAKE_CASE__ ) ,msg=SCREAMING_SNAKE_CASE__ )
| 143 |
'''simple docstring'''
A_ = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
A_ = frozenset(["prompt", "negative_prompt"])
A_ = frozenset([])
A_ = frozenset(["image"])
A_ = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
A_ = frozenset(["image"])
A_ = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
A_ = frozenset(["prompt", "image", "negative_prompt"])
A_ = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
A_ = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
A_ = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
A_ = frozenset(["image", "mask_image"])
A_ = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
A_ = frozenset(["example_image", "image", "mask_image"])
A_ = frozenset(["class_labels"])
A_ = frozenset(["class_labels"])
A_ = frozenset(["batch_size"])
A_ = frozenset([])
A_ = frozenset(["batch_size"])
A_ = frozenset([])
A_ = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
A_ = frozenset(["prompt", "negative_prompt"])
A_ = frozenset(["input_tokens"])
A_ = frozenset(["input_tokens"])
| 143 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_snake_case : Any = logging.get_logger(__name__)
class lowerCAmelCase ( _UpperCAmelCase ):
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase ) | 719 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_snake_case : Optional[Any] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _a ( _SCREAMING_SNAKE_CASE : Tuple ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
from transformers.testing_utils import pytest_terminal_summary_main
_SCREAMING_SNAKE_CASE = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE ) | 493 | 0 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCamelCase_ : int = get_logger(__name__)
class __lowerCAmelCase :
"""simple docstring"""
snake_case = "dummy_data"
snake_case = "datasets"
snake_case = False
def __init__( self : Optional[Any] , _snake_case : str , _snake_case : str , _snake_case : Union[Version, str] , _snake_case : Optional[str] = None , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[List[Callable]] = None , ) -> List[str]:
"""simple docstring"""
A_ = 0
A_ = dataset_name
A_ = cache_dir
A_ = use_local_dummy_data
A_ = config
# download_callbacks take a single url as input
A_ = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
A_ = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
A_ = str(_snake_case )
# to be downloaded
A_ = None
A_ = None
@property
def lowerCamelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
if self._dummy_file is None:
A_ = self.download_dummy_data()
return self._dummy_file
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def lowerCamelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def lowerCamelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
A_ = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
A_ = cached_path(
_snake_case , cache_dir=self.cache_dir , extract_compressed_file=_snake_case , force_extract=_snake_case )
return os.path.join(_snake_case , self.dummy_file_name )
@property
def lowerCamelCase__ ( self : Any ) -> Any:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowerCamelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
if self._bucket_url is None:
A_ = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def lowerCamelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def lowerCamelCase__ ( self : Any , _snake_case : Optional[int] , *_snake_case : int ) -> Tuple:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
A_ = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
A_ = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_snake_case , _snake_case ):
return self.create_dummy_data_dict(_snake_case , _snake_case )
elif isinstance(_snake_case , (list, tuple) ):
return self.create_dummy_data_list(_snake_case , _snake_case )
else:
return self.create_dummy_data_single(_snake_case , _snake_case )
def lowerCamelCase__ ( self : Optional[Any] , _snake_case : str , *_snake_case : List[str] ) -> str:
"""simple docstring"""
return self.download_and_extract(_snake_case )
def lowerCamelCase__ ( self : str , _snake_case : Dict , _snake_case : List[Any] ) -> int:
"""simple docstring"""
return self.download_and_extract(_snake_case )
def lowerCamelCase__ ( self : Optional[int] , _snake_case : Dict , *_snake_case : List[str] , **_snake_case : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return path
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return {}
def lowerCamelCase__ ( self : str , _snake_case : Union[str, Any] , _snake_case : List[Any] ) -> Any:
"""simple docstring"""
A_ = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_snake_case , _snake_case ):
for single_url in single_urls:
download_callback(_snake_case )
else:
A_ = single_urls
download_callback(_snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_snake_case , _snake_case ):
A_ = [os.path.join(_snake_case , urllib.parse.quote_plus(Path(_snake_case ).name ) ) for x in single_urls]
else:
A_ = single_urls
A_ = os.path.join(_snake_case , urllib.parse.quote_plus(Path(_snake_case ).name ) )
A_ = value
# make sure that values are unique
if all(isinstance(_snake_case , _snake_case ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
A_ = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowerCamelCase__ ( self : Dict , _snake_case : List[str] , _snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
A_ = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
A_ = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , _snake_case ) ) for url in data_url )
A_ = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
A_ = [data_url[0]] * len(_snake_case )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
A_ = os.path.join(_snake_case , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(_snake_case )
return dummy_data_list
def lowerCamelCase__ ( self : int , _snake_case : Dict , _snake_case : Optional[Any] ) -> int:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
A_ = os.path.join(_snake_case , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(_snake_case ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCamelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCamelCase__ ( self : List[str] , _snake_case : Dict ) -> str:
"""simple docstring"""
def _iter_archive_members(_snake_case : Optional[int] ):
# this preserves the order of the members inside the ZIP archive
A_ = Path(self.dummy_file ).parent
A_ = path.relative_to(_snake_case )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
A_ = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_snake_case )
A_ = Path(_snake_case )
A_ = _iter_archive_members(_snake_case ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(_snake_case ).as_posix(), file_path.open("rb" )
def lowerCamelCase__ ( self : Optional[Any] , _snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
A_ = [paths]
for path in paths:
if os.path.isfile(_snake_case ):
if os.path.basename(_snake_case ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_snake_case ):
if os.path.basename(_snake_case ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(_snake_case ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(_snake_case , _snake_case )
| 115 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Any , _snake_case : Union[str, Any]=99 , _snake_case : List[Any]=13 , _snake_case : Optional[Any]=7 , _snake_case : Union[str, Any]=9 , _snake_case : List[Any]=True , _snake_case : Optional[int]=True , _snake_case : Any=False , _snake_case : str=32 , _snake_case : Any=5 , _snake_case : List[str]=4 , _snake_case : Dict=37 , _snake_case : List[Any]=8 , _snake_case : int=0.1 , _snake_case : List[str]=0.0_0_2 , _snake_case : Optional[Any]=1 , _snake_case : Union[str, Any]=0 , _snake_case : Any=0 , _snake_case : List[str]=None , _snake_case : List[Any]=None , ) -> int:
"""simple docstring"""
A_ = parent
A_ = batch_size
A_ = encoder_seq_length
A_ = decoder_seq_length
# For common tests
A_ = self.decoder_seq_length
A_ = is_training
A_ = use_attention_mask
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = d_ff
A_ = relative_attention_num_buckets
A_ = dropout_rate
A_ = initializer_factor
A_ = eos_token_id
A_ = pad_token_id
A_ = decoder_start_token_id
A_ = None
A_ = decoder_layers
def lowerCamelCase__ ( self : Any ) -> Any:
"""simple docstring"""
return TaConfig.from_pretrained("google/umt5-base" )
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : int , _snake_case : Dict=None , _snake_case : Dict=None , _snake_case : str=None , _snake_case : Any=None , _snake_case : Union[str, Any]=None , ) -> List[Any]:
"""simple docstring"""
if attention_mask is None:
A_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
A_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
A_ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_snake_case )
if decoder_head_mask is None:
A_ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_snake_case )
if cross_attn_head_mask is None:
A_ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_snake_case )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCamelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
A_ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
A_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
A_ = input_ids.clamp(self.pad_token_id + 1 )
A_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
A_ = self.get_config()
A_ = config.num_attention_heads
A_ = self.prepare_inputs_dict(_snake_case , _snake_case , _snake_case )
return config, input_dict
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
A_ , A_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCamelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCamelCase__ ( self : Optional[int] , _snake_case : List[str] , _snake_case : Any , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
A_ = UMTaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A_ = model(
input_ids=_snake_case , decoder_input_ids=_snake_case , attention_mask=_snake_case , decoder_attention_mask=_snake_case , )
A_ = model(input_ids=_snake_case , decoder_input_ids=_snake_case )
A_ = result.last_hidden_state
A_ = result.past_key_values
A_ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_snake_case ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCamelCase__ ( self : Optional[Any] , _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : List[str] , _snake_case : str , _snake_case : Optional[Any] , _snake_case : int , ) -> List[str]:
"""simple docstring"""
A_ = UMTaModel(config=_snake_case ).get_decoder().to(_snake_case ).eval()
# first forward pass
A_ = model(_snake_case , use_cache=_snake_case )
A_ = model(_snake_case )
A_ = model(_snake_case , use_cache=_snake_case )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) + 1 )
A_ , A_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
A_ = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ = model(_snake_case )["last_hidden_state"]
A_ = model(_snake_case , past_key_values=_snake_case )["last_hidden_state"]
# select random slice
A_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ = output_from_no_past[:, -1, random_slice_idx].detach()
A_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-3 ) )
def lowerCamelCase__ ( self : str , _snake_case : Any , _snake_case : str , ) -> List[Any]:
"""simple docstring"""
A_ = UMTaModel(config=_snake_case ).to(_snake_case ).half().eval()
A_ = model(**_snake_case )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_snake_case ).any().item() )
@require_torch
class __lowerCAmelCase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
snake_case = (UMTaForConditionalGeneration,) if is_torch_available() else ()
snake_case = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
snake_case = True
snake_case = False
snake_case = False
snake_case = True
snake_case = True
# The small UMT5 model needs higher percentages for CPU/MP tests
snake_case = [0.8, 0.9]
def lowerCamelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
A_ = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
A_ = UMTaModel(config_and_inputs[0] ).to(_snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_snake_case , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=_snake_case , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_snake_case )
def lowerCamelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
A_ = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
A_ = self.model_tester.prepare_config_and_inputs()
A_ = config_and_inputs[0]
A_ = UMTaForConditionalGeneration(_snake_case ).eval()
model.to(_snake_case )
A_ = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=_snake_case ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=_snake_case ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=_snake_case ),
}
for attn_name, (name, mask) in zip(_snake_case , head_masking.items() ):
A_ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
A_ = torch.ones(
config.num_decoder_layers , config.num_heads , device=_snake_case )
A_ = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=_snake_case , return_dict_in_generate=_snake_case , **_snake_case , )
# We check the state of decoder_attentions and cross_attentions just from the last step
A_ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def lowerCamelCase__ ( self : Dict ) -> int:
"""simple docstring"""
A_ = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=_snake_case ).to(_snake_case )
A_ = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=_snake_case , legacy=_snake_case )
A_ = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
A_ = tokenizer(_snake_case , return_tensors="pt" , padding=_snake_case ).input_ids
# fmt: off
A_ = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(_snake_case , _snake_case )
A_ = model.generate(input_ids.to(_snake_case ) )
A_ = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
A_ = tokenizer.batch_decode(_snake_case )
self.assertEqual(_snake_case , _snake_case )
| 115 | 1 |
from __future__ import annotations
from math import gcd
def _A (UpperCamelCase : int , UpperCamelCase : int = 2 , UpperCamelCase : int = 1 , UpperCamelCase : int = 3 , ):
'''simple docstring'''
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ) -> int:
return (pow(UpperCamelCase , 2 ) + step) % modulus
for _ in range(UpperCamelCase ):
# These track the position within the cycle detection logic.
lowerCamelCase__ : int = seed
lowerCamelCase__ : str = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ : List[Any] = rand_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : str = rand_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : str = rand_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ : Any = gcd(hare - tortoise , UpperCamelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ : List[Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
_lowercase = parser.parse_args()
_lowercase = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
_lowercase = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 714 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowercase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __A ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=None , __magic_name__=True , __magic_name__=True , __magic_name__=None , ):
lowerCamelCase__ : Optional[Any] = size if size is not None else {"""height""": 20, """width""": 20}
lowerCamelCase__ : List[str] = parent
lowerCamelCase__ : str = batch_size
lowerCamelCase__ : List[str] = num_channels
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Union[str, Any] = min_resolution
lowerCamelCase__ : List[Any] = max_resolution
lowerCamelCase__ : Optional[int] = size
lowerCamelCase__ : List[str] = do_normalize
lowerCamelCase__ : str = do_convert_rgb
lowerCamelCase__ : str = [512, 1024, 2048, 4096]
lowerCamelCase__ : Any = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
def _snake_case (self ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def _snake_case (self ):
lowerCamelCase__ : List[str] = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"""
lowerCamelCase__ : int = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ).convert("""RGB""" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class __A ( A_ , unittest.TestCase ):
UpperCamelCase :Union[str, Any] = PixaStructImageProcessor if is_vision_available() else None
def _snake_case (self ):
lowerCamelCase__ : List[str] = PixaStructImageProcessingTester(self )
@property
def _snake_case (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case (self ):
lowerCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) )
def _snake_case (self ):
lowerCamelCase__ : Tuple = self.image_processor_tester.prepare_dummy_image()
lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
lowerCamelCase__ : List[str] = 2048
lowerCamelCase__ : Any = image_processor(__magic_name__ , return_tensors="""pt""" , max_patches=__magic_name__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1E-3 , rtol=1E-3 ) )
def _snake_case (self ):
# Initialize image_processor
lowerCamelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase__ : Optional[Any] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCamelCase__ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCamelCase__ : Optional[Any] = image_processor(
__magic_name__ , return_tensors="""pt""" , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _snake_case (self ):
# Initialize image_processor
lowerCamelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase__ : List[Any] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
lowerCamelCase__ : List[str] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__magic_name__ ):
lowerCamelCase__ : Tuple = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__magic_name__ ).flattened_patches
lowerCamelCase__ : Optional[Any] = """Hello"""
lowerCamelCase__ : Dict = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__magic_name__ , header_text=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCamelCase__ : Tuple = image_processor(
__magic_name__ , return_tensors="""pt""" , max_patches=__magic_name__ , header_text=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _snake_case (self ):
# Initialize image_processor
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
lowerCamelCase__ : Optional[int] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCamelCase__ : List[str] = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCamelCase__ : List[str] = image_processor(
__magic_name__ , return_tensors="""pt""" , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _snake_case (self ):
# Initialize image_processor
lowerCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
lowerCamelCase__ : List[str] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCamelCase__ : Tuple = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCamelCase__ : Optional[Any] = image_processor(
__magic_name__ , return_tensors="""pt""" , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class __A ( A_ , unittest.TestCase ):
UpperCamelCase :Optional[int] = PixaStructImageProcessor if is_vision_available() else None
def _snake_case (self ):
lowerCamelCase__ : Optional[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
lowerCamelCase__ : Optional[int] = 3
@property
def _snake_case (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case (self ):
lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) )
def _snake_case (self ):
# Initialize image_processor
lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase__ : List[str] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCamelCase__ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCamelCase__ : str = image_processor(
__magic_name__ , return_tensors="""pt""" , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 96 | 0 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class a__ ( enum.Enum ):
a : Any = 0
a : Dict = 1
a : List[Any] = 2
@add_end_docstrings(UpperCamelCase__ )
class a__ ( UpperCamelCase__ ):
a : Tuple = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self , *A , **A ) -> str:
'''simple docstring'''
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
a = None
if self.model.config.prefix is not None:
a = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
a = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
a , a , a = self._sanitize_parameters(prefix=A , **self._forward_params )
a = {**self._preprocess_params, **preprocess_params}
a = {**self._forward_params, **forward_params}
def lowerCAmelCase_ ( self , A=None , A=None , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> List[Any]:
'''simple docstring'''
a = {}
if prefix is not None:
a = prefix
if prefix:
a = self.tokenizer(
A , padding=A , add_special_tokens=A , return_tensors=self.framework )
a = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
a = handle_long_generation
preprocess_params.update(A )
a = generate_kwargs
a = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
a = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
a = ReturnType.TENSORS
if return_type is not None:
a = return_type
if clean_up_tokenization_spaces is not None:
a = clean_up_tokenization_spaces
if stop_sequence is not None:
a = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
a = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase_ ( self , *A , **A ) -> Optional[Any]:
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*A , **A )
def __call__( self , A , **A ) -> List[str]:
'''simple docstring'''
return super().__call__(A , **A )
def lowerCAmelCase_ ( self , A , A="" , A=None , **A ) -> Optional[int]:
'''simple docstring'''
a = self.tokenizer(
prefix + prompt_text , padding=A , add_special_tokens=A , return_tensors=self.framework )
a = prompt_text
if handle_long_generation == "hole":
a = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
a = generate_kwargs["max_new_tokens"]
else:
a = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
a = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
a = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
a = inputs["attention_mask"][:, -keep_length:]
return inputs
def lowerCAmelCase_ ( self , A , **A ) -> List[str]:
'''simple docstring'''
a = model_inputs["input_ids"]
a = model_inputs.get("attention_mask" , A )
# Allow empty prompts
if input_ids.shape[1] == 0:
a = None
a = None
a = 1
else:
a = input_ids.shape[0]
a = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
a = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
a = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
a = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
a = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
a = self.model.generate(input_ids=A , attention_mask=A , **A )
a = generated_sequence.shape[0]
if self.framework == "pt":
a = generated_sequence.reshape(A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
a = tf.reshape(A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCAmelCase_ ( self , A , A=ReturnType.FULL_TEXT , A=True ) -> Union[str, Any]:
'''simple docstring'''
a = model_outputs["generated_sequence"][0]
a = model_outputs["input_ids"]
a = model_outputs["prompt_text"]
a = generated_sequence.numpy().tolist()
a = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
a = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
a = self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
a = 0
else:
a = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=A , clean_up_tokenization_spaces=A , ) )
if return_type == ReturnType.FULL_TEXT:
a = prompt_text + text[prompt_length:]
else:
a = text[prompt_length:]
a = {"generated_text": all_text}
records.append(A )
return records
| 515 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
lowercase__ : Optional[Any] = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ) -> List[str]:
a = bnb_quantization_config.load_in_abit
a = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed.")
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed.")
a = []
# custom device map
if isinstance(__UpperCamelCase , __UpperCamelCase) and len(device_map.keys()) > 1:
a = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
a = get_keys_to_not_convert(__UpperCamelCase)
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__UpperCamelCase)
a = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
a = []
a = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__UpperCamelCase)
# compatibility with peft
a = load_in_abit
a = load_in_abit
a = get_parameter_device(__UpperCamelCase)
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager.")
a = replace_with_bnb_layers(__UpperCamelCase , __UpperCamelCase , modules_to_not_convert=__UpperCamelCase)
# convert param to the right dtype
a = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules):
param.to(torch.floataa)
if param.dtype != torch.floataa:
a = name.replace(".weight" , "").replace(".bias" , "")
a = getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
if param is not None:
param.to(torch.floataa)
elif torch.is_floating_point(__UpperCamelCase):
param.to(__UpperCamelCase)
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device())
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device())
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"We move the model to cuda.")
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''')
else:
with init_empty_weights():
a = replace_with_bnb_layers(
__UpperCamelCase , __UpperCamelCase , modules_to_not_convert=__UpperCamelCase)
a = get_quantized_model_device_map(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , max_memory=__UpperCamelCase , no_split_module_classes=__UpperCamelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
a = True
a = any(x in list(device_map.values()) for x in ["cpu", "disk"])
load_checkpoint_in_model(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__UpperCamelCase , offload_state_dict=__UpperCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__UpperCamelCase , device_map=__UpperCamelCase , offload_dir=__UpperCamelCase)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None) -> Any:
if device_map is None:
if torch.cuda.is_available():
a = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.")
if isinstance(__UpperCamelCase , __UpperCamelCase):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'.")
a = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules)
})
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules)
})
a = {}
a = special_dtypes
a = no_split_module_classes
a = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
a = get_balanced_memory(
__UpperCamelCase , low_zero=(device_map == "balanced_low_0") , max_memory=__UpperCamelCase , **__UpperCamelCase , )
a = max_memory
a = infer_auto_device_map(__UpperCamelCase , **__UpperCamelCase)
if isinstance(__UpperCamelCase , __UpperCamelCase):
# check if don't have any quantized module on the cpu
a = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
a = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ")
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit")
del device_map_without_some_modules
return device_map
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None) -> List[Any]:
if modules_to_not_convert is None:
a = []
a , a = _replace_with_bnb_layers(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug.")
return model
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ) -> List[str]:
a = False
for name, module in model.named_children():
if current_key_name is None:
a = []
current_key_name.append(__UpperCamelCase)
if isinstance(__UpperCamelCase , nn.Linear) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
a = ".".join(__UpperCamelCase)
a = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
a = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
a = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__UpperCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
a = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False")
a = module.weight.data
if module.bias is not None:
a = module.bias.data
bnb_module.requires_grad_(__UpperCamelCase)
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
a = True
if len(list(module.children())) > 0:
a , a = _replace_with_bnb_layers(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
a = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> List[str]:
# Create a copy of the model
with init_empty_weights():
a = deepcopy(__UpperCamelCase) # this has 0 cost since it is done inside `init_empty_weights` context manager`
a = find_tied_parameters(__UpperCamelCase)
# For compatibility with Accelerate < 0.18
if isinstance(__UpperCamelCase , __UpperCamelCase):
a = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
a = sum(__UpperCamelCase , [])
a = len(__UpperCamelCase) > 0
# Check if it is a base model
a = False
if hasattr(__UpperCamelCase , "base_model_prefix"):
a = not hasattr(__UpperCamelCase , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
a = list(model.named_children())
a = [list_modules[-1][0]]
# add last module together with tied weights
a = set(__UpperCamelCase) - set(__UpperCamelCase)
a = list(set(__UpperCamelCase)) + list(__UpperCamelCase)
# remove ".weight" from the keys
a = [".weight", ".bias"]
a = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
a = name.replace(__UpperCamelCase , "")
filtered_module_names.append(__UpperCamelCase)
return filtered_module_names
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Union[str, Any]:
for m in model.modules():
if isinstance(__UpperCamelCase , bnb.nn.Linearabit):
return True
return False
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> List[Any]:
return next(parameter.parameters()).device
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> Optional[Any]:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__UpperCamelCase , __UpperCamelCase , 0 , dtype=__UpperCamelCase , value=__UpperCamelCase)
a = param_name
a = model
if "." in tensor_name:
a = tensor_name.split(".")
for split in splits[:-1]:
a = getattr(__UpperCamelCase , __UpperCamelCase)
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''')
a = new_module
a = splits[-1]
# offload weights
a = False
offload_weight(module._parameters[tensor_name] , __UpperCamelCase , __UpperCamelCase , index=__UpperCamelCase)
if hasattr(module._parameters[tensor_name] , "SCB"):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB") , __UpperCamelCase , index=__UpperCamelCase , )
else:
offload_weight(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index=__UpperCamelCase)
offload_weight(__UpperCamelCase , param_name.replace("weight" , "SCB") , __UpperCamelCase , index=__UpperCamelCase)
set_module_tensor_to_device(__UpperCamelCase , __UpperCamelCase , "meta" , dtype=__UpperCamelCase , value=torch.empty(*param.size()))
| 515 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ :str = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Optional[int] = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Optional[Any] = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
lowercase__ :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure) | 721 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : torch.FloatTensor
class snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : Union[str, Any] , __lowercase : int = 32 , __lowercase : int = 64 , __lowercase : int = 20 , __lowercase : int = 768 , __lowercase : Optional[int]=77 , __lowercase : Union[str, Any]=4 , __lowercase : float = 0.0 , __lowercase : str = "silu" , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "linear" , __lowercase : Optional[str] = "prd" , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : List[Any] = attention_head_dim
__UpperCAmelCase : int = num_attention_heads * attention_head_dim
__UpperCAmelCase : List[str] = additional_embeddings
__UpperCAmelCase : Optional[int] = time_embed_dim or inner_dim
__UpperCAmelCase : Tuple = embedding_proj_dim or embedding_dim
__UpperCAmelCase : Dict = clip_embed_dim or embedding_dim
__UpperCAmelCase : Dict = Timesteps(__lowercase , __lowercase , 0 )
__UpperCAmelCase : List[str] = TimestepEmbedding(__lowercase , __lowercase , out_dim=__lowercase , act_fn=__lowercase )
__UpperCAmelCase : Any = nn.Linear(__lowercase , __lowercase )
if embedding_proj_norm_type is None:
__UpperCAmelCase : Dict = None
elif embedding_proj_norm_type == "layer":
__UpperCAmelCase : Any = nn.LayerNorm(__lowercase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
__UpperCAmelCase : List[Any] = nn.Linear(__lowercase , __lowercase )
if encoder_hid_proj_type is None:
__UpperCAmelCase : int = None
elif encoder_hid_proj_type == "linear":
__UpperCAmelCase : int = nn.Linear(__lowercase , __lowercase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
__UpperCAmelCase : str = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowercase ) )
if added_emb_type == "prd":
__UpperCAmelCase : Optional[int] = nn.Parameter(torch.zeros(1 , 1 , __lowercase ) )
elif added_emb_type is None:
__UpperCAmelCase : str = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
__UpperCAmelCase : Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(
__lowercase , __lowercase , __lowercase , dropout=__lowercase , activation_fn='''gelu''' , attention_bias=__lowercase , )
for d in range(__lowercase )
] )
if norm_in_type == "layer":
__UpperCAmelCase : Optional[Any] = nn.LayerNorm(__lowercase )
elif norm_in_type is None:
__UpperCAmelCase : List[Any] = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
__UpperCAmelCase : str = nn.LayerNorm(__lowercase )
__UpperCAmelCase : List[Any] = nn.Linear(__lowercase , __lowercase )
__UpperCAmelCase : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
__UpperCAmelCase : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , __lowercase , persistent=__lowercase )
__UpperCAmelCase : Any = nn.Parameter(torch.zeros(1 , __lowercase ) )
__UpperCAmelCase : List[str] = nn.Parameter(torch.zeros(1 , __lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A_ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = {}
def fn_recursive_add_processors(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict[str, AttentionProcessor] ):
if hasattr(__lowercase , '''set_processor''' ):
__UpperCAmelCase : Optional[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , __lowercase , __lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowercase , __lowercase , __lowercase )
return processors
def A_ ( self : Any , __lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = len(self.attn_processors.keys() )
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(__lowercase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : int ):
if hasattr(__lowercase , '''set_processor''' ):
if not isinstance(__lowercase , __lowercase ):
module.set_processor(__lowercase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , __lowercase , __lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase )
def A_ ( self : Optional[Any] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def A_ ( self : Optional[int] , __lowercase : Dict , __lowercase : Union[torch.Tensor, float, int] , __lowercase : torch.FloatTensor , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.BoolTensor] = None , __lowercase : bool = True , ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = hidden_states.shape[0]
__UpperCAmelCase : Any = timestep
if not torch.is_tensor(__lowercase ):
__UpperCAmelCase : Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0:
__UpperCAmelCase : Union[str, Any] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase : List[str] = timesteps * torch.ones(__lowercase , dtype=timesteps.dtype , device=timesteps.device )
__UpperCAmelCase : List[str] = self.time_proj(__lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__UpperCAmelCase : Any = timesteps_projected.to(dtype=self.dtype )
__UpperCAmelCase : List[str] = self.time_embedding(__lowercase )
if self.embedding_proj_norm is not None:
__UpperCAmelCase : Dict = self.embedding_proj_norm(__lowercase )
__UpperCAmelCase : Optional[int] = self.embedding_proj(__lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__UpperCAmelCase : Dict = self.encoder_hidden_states_proj(__lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
__UpperCAmelCase : int = self.proj_in(__lowercase )
__UpperCAmelCase : Tuple = self.positional_embedding.to(hidden_states.dtype )
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : str = 0
if encoder_hidden_states is not None:
additional_embeds.append(__lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__UpperCAmelCase : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__UpperCAmelCase : Optional[int] = hidden_states[:, None, :]
__UpperCAmelCase : str = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__UpperCAmelCase : Dict = self.prd_embedding.to(hidden_states.dtype ).expand(__lowercase , -1 , -1 )
additional_embeds.append(__lowercase )
__UpperCAmelCase : Optional[Any] = torch.cat(
__lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__UpperCAmelCase : Optional[int] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__UpperCAmelCase : List[str] = F.pad(
__lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__UpperCAmelCase : Dict = hidden_states + positional_embeddings
if attention_mask is not None:
__UpperCAmelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
__UpperCAmelCase : Optional[Any] = F.pad(__lowercase , (0, self.additional_embeddings) , value=0.0 )
__UpperCAmelCase : Dict = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__UpperCAmelCase : Tuple = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__UpperCAmelCase : str = self.norm_in(__lowercase )
for block in self.transformer_blocks:
__UpperCAmelCase : Dict = block(__lowercase , attention_mask=__lowercase )
__UpperCAmelCase : Any = self.norm_out(__lowercase )
if self.prd_embedding is not None:
__UpperCAmelCase : List[Any] = hidden_states[:, -1]
else:
__UpperCAmelCase : List[str] = hidden_states[:, additional_embeddings_len:]
__UpperCAmelCase : Optional[int] = self.proj_to_clip_embeddings(__lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__lowercase )
def A_ ( self : List[Any] , __lowercase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents | 374 | 0 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__magic_name__ = logging.get_logger(__name__)
# General docstring
__magic_name__ = """RegNetConfig"""
# Base docstring
__magic_name__ = """facebook/regnet-y-040"""
__magic_name__ = [1, 10_88, 7, 7]
# Image classification docstring
__magic_name__ = """facebook/regnet-y-040"""
__magic_name__ = """tabby, tabby cat"""
__magic_name__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[str] = "relu" , ):
super().__init__()
lowerCamelCase__ = nn.Convad(
UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=UpperCAmelCase__ , stride=UpperCAmelCase__ , padding=kernel_size // 2 , groups=UpperCAmelCase__ , bias=UpperCAmelCase__ , )
lowerCamelCase__ = nn.BatchNormad(UpperCAmelCase__ )
lowerCamelCase__ = ACTaFN[activation] if activation is not None else nn.Identity()
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ):
lowerCamelCase__ = self.convolution(UpperCAmelCase__ )
lowerCamelCase__ = self.normalization(UpperCAmelCase__ )
lowerCamelCase__ = self.activation(UpperCAmelCase__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig ):
super().__init__()
lowerCamelCase__ = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowerCamelCase__ = config.num_channels
def __UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCamelCase__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowerCamelCase__ = self.embedder(UpperCAmelCase__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 ):
super().__init__()
lowerCamelCase__ = nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , stride=UpperCAmelCase__ , bias=UpperCAmelCase__ )
lowerCamelCase__ = nn.BatchNormad(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Tensor ):
lowerCamelCase__ = self.convolution(UpperCAmelCase__ )
lowerCamelCase__ = self.normalization(UpperCAmelCase__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
super().__init__()
lowerCamelCase__ = nn.AdaptiveAvgPoolad((1, 1) )
lowerCamelCase__ = nn.Sequential(
nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 ) , nn.Sigmoid() , )
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCamelCase__ = self.pooler(UpperCAmelCase__ )
lowerCamelCase__ = self.attention(UpperCAmelCase__ )
lowerCamelCase__ = hidden_state * attention
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 ):
super().__init__()
lowerCamelCase__ = in_channels != out_channels or stride != 1
lowerCamelCase__ = max(1 , out_channels // config.groups_width )
lowerCamelCase__ = (
RegNetShortCut(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase__ = nn.Sequential(
RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ ) , )
lowerCamelCase__ = ACTaFN[config.hidden_act]
def __UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCamelCase__ = hidden_state
lowerCamelCase__ = self.layer(UpperCAmelCase__ )
lowerCamelCase__ = self.shortcut(UpperCAmelCase__ )
hidden_state += residual
lowerCamelCase__ = self.activation(UpperCAmelCase__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 ):
super().__init__()
lowerCamelCase__ = in_channels != out_channels or stride != 1
lowerCamelCase__ = max(1 , out_channels // config.groups_width )
lowerCamelCase__ = (
RegNetShortCut(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase__ = nn.Sequential(
RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act ) , RegNetSELayer(UpperCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ ) , )
lowerCamelCase__ = ACTaFN[config.hidden_act]
def __UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCamelCase__ = hidden_state
lowerCamelCase__ = self.layer(UpperCAmelCase__ )
lowerCamelCase__ = self.shortcut(UpperCAmelCase__ )
hidden_state += residual
lowerCamelCase__ = self.activation(UpperCAmelCase__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , ):
super().__init__()
lowerCamelCase__ = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
lowerCamelCase__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , ) , *[layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) for _ in range(depth - 1 )] , )
def __UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
lowerCamelCase__ = self.layers(UpperCAmelCase__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE_ : RegNetConfig ):
super().__init__()
lowerCamelCase__ = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
UpperCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowerCamelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(UpperCAmelCase__ , config.depths[1:] ):
self.stages.append(RegNetStage(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , depth=UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True ):
lowerCamelCase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCamelCase__ = hidden_states + (hidden_state,)
lowerCamelCase__ = stage_module(UpperCAmelCase__ )
if output_hidden_states:
lowerCamelCase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case = RegNetConfig
snake_case = "regnet"
snake_case = "pixel_values"
snake_case = True
def __UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
if isinstance(UpperCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(UpperCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int]=False ):
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCamelCase__ = value
__magic_name__ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
__magic_name__ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowercase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] ):
super().__init__(UpperCAmelCase__ )
lowerCamelCase__ = config
lowerCamelCase__ = RegNetEmbeddings(UpperCAmelCase__ )
lowerCamelCase__ = RegNetEncoder(UpperCAmelCase__ )
lowerCamelCase__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None ):
lowerCamelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ = self.embedder(UpperCAmelCase__ )
lowerCamelCase__ = self.encoder(
UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
lowerCamelCase__ = encoder_outputs[0]
lowerCamelCase__ = self.pooler(UpperCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase__ , pooler_output=UpperCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowercase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Any ):
super().__init__(UpperCAmelCase__ )
lowerCamelCase__ = config.num_labels
lowerCamelCase__ = RegNetModel(UpperCAmelCase__ )
# classification head
lowerCamelCase__ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCAmelCase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , ):
lowerCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ = self.regnet(UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
lowerCamelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase__ = self.classifier(UpperCAmelCase__ )
lowerCamelCase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase__ = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase__ = '''single_label_classification'''
else:
lowerCamelCase__ = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowerCamelCase__ = MSELoss()
if self.num_labels == 1:
lowerCamelCase__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCamelCase__ = loss_fct(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
lowerCamelCase__ = CrossEntropyLoss()
lowerCamelCase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase__ = BCEWithLogitsLoss()
lowerCamelCase__ = loss_fct(UpperCAmelCase__ , UpperCAmelCase__ )
if not return_dict:
lowerCamelCase__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase__ , logits=UpperCAmelCase__ , hidden_states=outputs.hidden_states )
| 129 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
lowercase : Union[str, Any] =img
lowercase : Union[str, Any] =img.shape[1]
lowercase : str =img.shape[0]
lowercase : Union[str, Any] =dst_width
lowercase : str =dst_height
lowercase : str =self.src_w / self.dst_w
lowercase : Optional[Any] =self.src_h / self.dst_h
lowercase : int =(
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowercase : List[Any] =self.img[self.get_y(UpperCAmelCase__ )][self.get_x(UpperCAmelCase__ )]
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int ):
'''simple docstring'''
return int(self.ratio_x * x )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
UpperCamelCase_ , UpperCamelCase_ = 800, 600
UpperCamelCase_ = imread("""image_data/lena.jpg""", 1)
UpperCamelCase_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 92 | 0 |
"""simple docstring"""
__magic_name__ = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.35_5818,
}
def _A ( __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase__ = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {', '.join(_lowerCAmelCase )}"""
)
raise ValueError(_lowerCAmelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
"""simple docstring"""
from itertools import count
def _A ( __lowercase = 50 ):
"""simple docstring"""
lowerCamelCase__ = [1] * min_block_length
for n in count(__lowercase ):
fill_count_functions.append(1 )
for block_length in range(__lowercase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F'{solution() = }')
| 258 | 0 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 592 | from math import isqrt, loga
def lowerCAmelCase__ ( a__ ) ->list[int]:
'''simple docstring'''
_UpperCamelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , a__ , a__ ):
_UpperCamelCase = False
return [i for i in range(2 , a__ ) if is_prime[i]]
def lowerCAmelCase__ ( a__ = 800_800 , a__ = 800_800 ) ->int:
'''simple docstring'''
_UpperCamelCase = degree * loga(a__ )
_UpperCamelCase = int(a__ )
_UpperCamelCase = calculate_prime_numbers(a__ )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = len(a__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 547 | 0 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = PriorTransformer
snake_case = "hidden_states"
@property
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : str = 4
A_ : List[str] = 8
A_ : Any = 7
A_ : Optional[Any] = floats_tensor((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = floats_tensor((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
A_ : List[str] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _snake_case ( self , _SCREAMING_SNAKE_CASE=0 )->List[Any]:
'''simple docstring'''
torch.manual_seed(_SCREAMING_SNAKE_CASE )
A_ : Dict = 4
A_ : int = 8
A_ : Optional[Any] = 7
A_ : str = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
A_ : List[str] = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _snake_case ( self )->List[Any]:
'''simple docstring'''
return (4, 8)
@property
def _snake_case ( self )->Tuple:
'''simple docstring'''
return (4, 8)
def _snake_case ( self )->str:
'''simple docstring'''
A_ : Any = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self )->int:
'''simple docstring'''
A_ , A_ : Any = PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ , A_ : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
A_ : List[Any] = self.model_class(**_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Dict = [*signature.parameters.keys()]
A_ : str = ['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : List[str] = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
A_ : int = model.to(_SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '''set_default_attn_processor''' ):
model.set_default_attn_processor()
A_ : Dict = self.get_dummy_seed_input()
with torch.no_grad():
A_ : str = model(**_SCREAMING_SNAKE_CASE )[0]
A_ : List[str] = output[0, :5].flatten().cpu()
print(_SCREAMING_SNAKE_CASE )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
A_ : Dict = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] )
self.assertTrue(torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1e-2 ) )
@slow
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=77 , _SCREAMING_SNAKE_CASE=0 )->int:
'''simple docstring'''
torch.manual_seed(_SCREAMING_SNAKE_CASE )
A_ : int = batch_size
A_ : List[str] = embedding_dim
A_ : Dict = num_embeddings
A_ : Any = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _snake_case ( self )->List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]],
[37, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]],
# fmt: on
] )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
A_ : Dict = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' , subfolder='''prior''' )
model.to(_SCREAMING_SNAKE_CASE )
A_ : str = self.get_dummy_seed_input(seed=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
A_ : int = model(**_SCREAMING_SNAKE_CASE )[0]
assert list(sample.shape ) == [1, 768]
A_ : Optional[Any] = sample[0, :8].flatten().cpu()
print(_SCREAMING_SNAKE_CASE )
A_ : Any = torch.tensor(_SCREAMING_SNAKE_CASE )
assert torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 )
| 152 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 152 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(lowerCAmelCase__ , 2 ) - pow(lowerCAmelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowerCAmelCase__ , 2 ) - pow(lowerCAmelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowerCAmelCase__ , 2 ) + pow(lowerCAmelCase__ , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
A_ = True
except ImportError:
A_ = False
try:
from torch.hub import _get_torch_home
A_ = _get_torch_home()
except ImportError:
A_ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
A_ = os.path.join(torch_cache_home, """transformers""")
A_ = """https://cdn.huggingface.co"""
A_ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
A_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
A_ = os.path.join(PATH, """config.yaml""")
A_ = os.path.join(PATH, """attributes.txt""")
A_ = os.path.join(PATH, """objects.txt""")
A_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
A_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
A_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
A_ = """pytorch_model.bin"""
A_ = """config.yaml"""
def lowercase ( lowerCAmelCase__=OBJECTS ,lowerCAmelCase__=ATTRIBUTES ):
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = OrderedDict()
with open(lowerCAmelCase__ ,'''rb''' ) as f:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
lowerCamelCase_ = ckp.pop(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCamelCase_ = torch.tensor(lowerCAmelCase__ )
else:
assert isinstance(lowerCAmelCase__ ,torch.tensor ), type(lowerCAmelCase__ )
lowerCamelCase_ = v
return r
class __lowerCamelCase :
a__: Union[str, Any] = {}
def __init__( self , UpperCAmelCase , UpperCAmelCase = "root" , UpperCAmelCase=0 ):
lowerCamelCase_ = name
lowerCamelCase_ = level
lowerCamelCase_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
lowerCamelCase_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = val
lowerCamelCase_ = val
lowerCamelCase_ = key.split('''.''' )
lowerCamelCase_ = len(UpperCAmelCase ) - 1
lowerCamelCase_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , '''.'''.join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
lowerCamelCase_ = val
else:
lowerCamelCase_ = pointer[l]
def UpperCAmelCase__ ( self ):
return self._pointer
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase ):
with open(UpperCAmelCase ) as stream:
lowerCamelCase_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self ):
lowerCamelCase_ = ''' '''
if self._name != "root":
lowerCamelCase_ = f"{t * (self._level-1)}{self._name}:\n"
else:
lowerCamelCase_ = ''''''
lowerCamelCase_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n"
lowerCamelCase_ = level
return r[:-1]
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''cache_dir''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''force_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''resume_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''proxies''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''local_files_only''' , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
lowerCamelCase_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
lowerCamelCase_ = pretrained_model_name_or_path
else:
lowerCamelCase_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
lowerCamelCase_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowerCamelCase_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
lowerCamelCase_ = '''Can\'t load config for'''
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(UpperCAmelCase ), kwargs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = torch.load('''dump.pt''' ,map_location=in_tensor.device )
lowerCamelCase_ = in_tensor.numpy()
lowerCamelCase_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ), (
f"{sum([1 for x in np.isclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = urlparse(lowerCAmelCase__ )
return parsed.scheme in ("http", "https")
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ):
lowerCamelCase_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowerCamelCase_ = '''/''' not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=0 ,lowerCAmelCase__=None ,):
lowerCamelCase_ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + "; ".join('''{}/{}'''.format(lowerCAmelCase__ ,lowerCAmelCase__ ) for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + user_agent
lowerCamelCase_ = {'''user-agent''': ua}
if resume_size > 0:
lowerCamelCase_ = '''bytes=%d-''' % (resume_size,)
lowerCamelCase_ = requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,headers=lowerCAmelCase__ )
if response.status_code == 416: # Range not satisfiable
return
lowerCamelCase_ = response.headers.get('''Content-Length''' )
lowerCamelCase_ = resume_size + int(lowerCAmelCase__ ) if content_length is not None else None
lowerCamelCase_ = tqdm(
unit='''B''' ,unit_scale=lowerCAmelCase__ ,total=lowerCAmelCase__ ,initial=lowerCAmelCase__ ,desc='''Downloading''' ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCAmelCase__ ) )
temp_file.write(lowerCAmelCase__ )
progress.close()
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=10 ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
lowerCamelCase_ = None
if not local_files_only:
try:
lowerCamelCase_ = requests.head(lowerCAmelCase__ ,allow_redirects=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,timeout=lowerCAmelCase__ )
if response.status_code == 200:
lowerCamelCase_ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowerCamelCase_ = url_to_filename(lowerCAmelCase__ ,lowerCAmelCase__ )
# get cache path to put the file
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCAmelCase__ ):
return cache_path
else:
lowerCamelCase_ = [
file
for file in fnmatch.filter(os.listdir(lowerCAmelCase__ ) ,filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowerCAmelCase__ ) > 0:
return os.path.join(lowerCAmelCase__ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowerCamelCase_ = cache_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowerCamelCase_ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(lowerCAmelCase__ ,'''a+b''' ) as f:
yield f
lowerCamelCase_ = _resumable_file_manager
if os.path.exists(lowerCAmelCase__ ):
lowerCamelCase_ = os.stat(lowerCAmelCase__ ).st_size
else:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = partial(tempfile.NamedTemporaryFile ,dir=lowerCAmelCase__ ,delete=lowerCAmelCase__ )
lowerCamelCase_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' ,lowerCAmelCase__ ,temp_file.name ,)
http_get(
lowerCAmelCase__ ,lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_size=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,)
os.replace(temp_file.name ,lowerCAmelCase__ )
lowerCamelCase_ = {'''url''': url, '''etag''': etag}
lowerCamelCase_ = cache_path + '''.json'''
with open(lowerCAmelCase__ ,'''w''' ) as meta_file:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
return cache_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowerCamelCase_ = url.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
lowerCamelCase_ = url_hash.hexdigest()
if etag:
lowerCamelCase_ = etag.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if is_remote_url(lowerCAmelCase__ ):
# URL, so get it from the cache (downloading if necessary)
lowerCamelCase_ = get_from_cache(
lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,)
elif os.path.exists(lowerCAmelCase__ ):
# File, and it exists.
lowerCamelCase_ = url_or_filename
elif urlparse(lowerCAmelCase__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowerCAmelCase__ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowerCAmelCase__ ) )
if extract_compressed_file:
if not is_zipfile(lowerCAmelCase__ ) and not tarfile.is_tarfile(lowerCAmelCase__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowerCamelCase_ , lowerCamelCase_ = os.path.split(lowerCAmelCase__ )
lowerCamelCase_ = output_file.replace('''.''' ,'''-''' ) + '''-extracted'''
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ) and os.listdir(lowerCAmelCase__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowerCamelCase_ = output_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
shutil.rmtree(lowerCAmelCase__ ,ignore_errors=lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ )
if is_zipfile(lowerCAmelCase__ ):
with ZipFile(lowerCAmelCase__ ,'''r''' ) as zip_file:
zip_file.extractall(lowerCAmelCase__ )
zip_file.close()
elif tarfile.is_tarfile(lowerCAmelCase__ ):
lowerCamelCase_ = tarfile.open(lowerCAmelCase__ )
tar_file.extractall(lowerCAmelCase__ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowerCAmelCase__ ) )
return output_path_extracted
return output_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="," ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as f:
lowerCamelCase_ = eval(f.read() )
else:
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
try:
lowerCamelCase_ = requests.json()
except Exception:
lowerCamelCase_ = req.content.decode()
assert data is not None, "could not connect"
try:
lowerCamelCase_ = eval(lowerCAmelCase__ )
except Exception:
lowerCamelCase_ = data.split('''\n''' )
req.close()
return data
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
lowerCamelCase_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCAmelCase__ )
with open(lowerCAmelCase__ ,'''rb''' ) as stream:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )
lowerCamelCase_ = weights.pop('''model''' )
lowerCamelCase_ = {}
for k, v in model.items():
lowerCamelCase_ = torch.from_numpy(lowerCAmelCase__ )
if "running_var" in k:
lowerCamelCase_ = torch.tensor([0] )
lowerCamelCase_ = k.replace('''running_var''' ,'''num_batches_tracked''' )
lowerCamelCase_ = zero
return new
def lowercase ( ):
print(f"{os.path.abspath(os.path.join(lowerCAmelCase__ ,os.pardir ) )}/demo.ipynb" )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="RGB" ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
lowerCamelCase_ = cva.imread(lowerCAmelCase__ )
else:
lowerCamelCase_ = get_image_from_url(lowerCAmelCase__ )
assert img is not None, f"could not connect to: {im}"
lowerCamelCase_ = cva.cvtColor(lowerCAmelCase__ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowerCamelCase_ = img[:, :, ::-1]
return img
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=1 ):
return (images[i : i + batch] for i in range(0 ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ ))
| 29 | 0 |
'''simple docstring'''
def _A ( __snake_case :Union[str, Any] , __snake_case :Any , __snake_case :List[str] , __snake_case :Optional[Any] ) -> str:
"""simple docstring"""
if height >= 1:
move_tower(height - 1 , __snake_case , __snake_case , __snake_case )
move_disk(__snake_case , __snake_case )
move_tower(height - 1 , __snake_case , __snake_case , __snake_case )
def _A ( __snake_case :Optional[Any] , __snake_case :List[str] ) -> int:
"""simple docstring"""
print("moving disk from" , __snake_case , "to" , __snake_case )
def _A ( ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(input("Height of hanoi: " ).strip() )
move_tower(__snake_case , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 704 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case : Any = logging.get_logger(__name__)
_snake_case : Optional[Any] = {'vocab_file': 'vocab.json'}
_snake_case : Any = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_snake_case : Optional[int] = {'mgp-str': 27}
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, _a, _a="[GO]", _a="[GO]", _a="[s]", _a="[GO]", **_a ) -> Dict:
super().__init__(
unk_token=_a, bos_token=_a, eos_token=_a, pad_token=_a, **_a, )
with open(_a, encoding="utf-8" ) as vocab_handle:
__SCREAMING_SNAKE_CASE = json.load(_a )
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.vocab.items()}
@property
def __lowerCAmelCase ( self ) -> List[str]:
return len(self.vocab )
def __lowerCAmelCase ( self ) -> Tuple:
return dict(self.vocab, **self.added_tokens_encoder )
def __lowerCAmelCase ( self, _a ) -> int:
__SCREAMING_SNAKE_CASE = []
for s in text:
char_tokens.extend(_a )
return char_tokens
def __lowerCAmelCase ( self, _a ) -> List[Any]:
return self.vocab.get(_a, self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self, _a ) -> Tuple:
return self.decoder.get(_a )
def __lowerCAmelCase ( self, _a, _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error("Vocabulary path ({}) should be a directory".format(_a ) )
return
__SCREAMING_SNAKE_CASE = os.path.join(
_a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(_a, "w", encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab, indent=2, sort_keys=_a, ensure_ascii=_a ) + "\n" )
return (vocab_file,)
| 214 | 0 |
import os
import sys
import unittest
UpperCamelCase__ : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
UpperCamelCase__ : Dict = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
UpperCamelCase__ : str = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = get_test_to_tester_mapping(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = get_test_to_tester_mapping(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''BertModelTest''': '''BertModelTester'''}
SCREAMING_SNAKE_CASE_ : List[str] = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = get_model_to_test_mapping(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = get_model_to_test_mapping(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
SCREAMING_SNAKE_CASE_ : Dict = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = get_model_to_tester_mapping(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = get_model_to_tester_mapping(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
SCREAMING_SNAKE_CASE_ : Tuple = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
| 105 | '''simple docstring'''
from collections.abc import Sequence
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = False ):
if not arr:
return 0
lowercase__ : Tuple = 0 if allow_empty_subarrays else float('''-inf''' )
lowercase__ : int = 0.0
for num in arr:
lowercase__ : Tuple = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase__ : int = max(UpperCAmelCase , UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
__a: List[str] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 152 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def _lowerCAmelCase ( UpperCamelCase__: List[str] ) -> Optional[Any]:
"""simple docstring"""
monkeypatch.setattr("""datasets.utils.deprecation_utils._emitted_deprecation_warnings""" , set() )
@pytest.fixture
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , a__ ) -> int:
A = metric_id
class _UpperCamelCase :
"""simple docstring"""
lowerCAmelCase = [MetricMock(__snake_case ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def _UpperCAmelCase ( self ) -> List[str]:
return self._metrics
monkeypatch.setattr("""datasets.inspect.huggingface_hub""" , HfhMock() )
@pytest.mark.parametrize(
"""func, args""" , [(load_metric, ("""metrics/mse""",)), (list_metrics, ()), (inspect_metric, ("""metrics/mse""", """tmp_path"""))] )
def _lowerCAmelCase ( UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Dict , UpperCamelCase__: Any , UpperCamelCase__: List[Any] ) -> List[str]:
"""simple docstring"""
if "tmp_path" in args:
A = tuple(arg if arg != """tmp_path""" else tmp_path for arg in args )
with pytest.warns(UpperCamelCase__ , match="""https://huggingface.co/docs/evaluate""" ):
func(*UpperCamelCase__ )
| 546 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Any=5 ) -> Optional[Any]:
"""simple docstring"""
assert masked_input.count("""<mask>""" ) == 1
A = torch.tensor(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) ).unsqueeze(0 ) # Batch size 1
A = model(UpperCamelCase__ )[0] # The last hidden-state is the first element of the output tuple
A = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
A = logits[0, masked_index, :]
A = logits.softmax(dim=0 )
A , A = prob.topk(k=UpperCamelCase__ , dim=0 )
A = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(UpperCamelCase__ ) )] )
A = tokenizer.mask_token
A = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
A = predicted_token_bpe.replace("""\u2581""" , """ """ )
if " {0}".format(UpperCamelCase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(UpperCamelCase__ ) , UpperCamelCase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(UpperCamelCase__ , UpperCamelCase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_lowercase : Optional[int] = CamembertTokenizer.from_pretrained("camembert-base")
_lowercase : int = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
_lowercase : Optional[int] = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 546 | 1 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
def get_masked_lm_array(UpperCamelCase__ : str ):
__UpperCAmelCase = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__UpperCAmelCase = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
__UpperCAmelCase = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_array(UpperCamelCase__ : str ):
__UpperCAmelCase = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__UpperCAmelCase = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
__UpperCAmelCase = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_layer_array(UpperCamelCase__ : int , UpperCamelCase__ : str ):
__UpperCAmelCase = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__UpperCAmelCase = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
__UpperCAmelCase = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_attention_layer_array(UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
__UpperCAmelCase = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__UpperCAmelCase = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = array.reshape(UpperCamelCase__ )
if "kernel" in name:
__UpperCAmelCase = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
print(f"""Loading model based on config from {config_path}...""" )
__UpperCAmelCase = BertConfig.from_json_file(UpperCamelCase__ )
__UpperCAmelCase = BertForMaskedLM(UpperCamelCase__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
__UpperCAmelCase = model.bert.encoder.layer[layer_index]
# Self-attention
__UpperCAmelCase = layer.attention.self
__UpperCAmelCase = get_encoder_attention_layer_array(
UpperCamelCase__ , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
__UpperCAmelCase = get_encoder_attention_layer_array(
UpperCamelCase__ , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
__UpperCAmelCase = get_encoder_attention_layer_array(
UpperCamelCase__ , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
__UpperCAmelCase = get_encoder_attention_layer_array(
UpperCamelCase__ , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
__UpperCAmelCase = get_encoder_attention_layer_array(
UpperCamelCase__ , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
__UpperCAmelCase = get_encoder_attention_layer_array(
UpperCamelCase__ , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
__UpperCAmelCase = layer.attention.output
__UpperCAmelCase = get_encoder_attention_layer_array(
UpperCamelCase__ , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
__UpperCAmelCase = get_encoder_attention_layer_array(
UpperCamelCase__ , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
__UpperCAmelCase = get_encoder_layer_array(UpperCamelCase__ , '''_attention_layer_norm/gamma''' )
__UpperCAmelCase = get_encoder_layer_array(UpperCamelCase__ , '''_attention_layer_norm/beta''' )
# Intermediate
__UpperCAmelCase = layer.intermediate
__UpperCAmelCase = get_encoder_layer_array(UpperCamelCase__ , '''_intermediate_dense/kernel''' )
__UpperCAmelCase = get_encoder_layer_array(UpperCamelCase__ , '''_intermediate_dense/bias''' )
# Output
__UpperCAmelCase = layer.output
__UpperCAmelCase = get_encoder_layer_array(UpperCamelCase__ , '''_output_dense/kernel''' )
__UpperCAmelCase = get_encoder_layer_array(UpperCamelCase__ , '''_output_dense/bias''' )
__UpperCAmelCase = get_encoder_layer_array(UpperCamelCase__ , '''_output_layer_norm/gamma''' )
__UpperCAmelCase = get_encoder_layer_array(UpperCamelCase__ , '''_output_layer_norm/beta''' )
# Embeddings
__UpperCAmelCase = get_encoder_array('''_position_embedding_layer/embeddings''' )
__UpperCAmelCase = get_encoder_array('''_type_embedding_layer/embeddings''' )
__UpperCAmelCase = get_encoder_array('''_embedding_norm_layer/gamma''' )
__UpperCAmelCase = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
__UpperCAmelCase = model.cls.predictions.transform
__UpperCAmelCase = get_masked_lm_array('''dense/kernel''' )
__UpperCAmelCase = get_masked_lm_array('''dense/bias''' )
__UpperCAmelCase = get_masked_lm_array('''layer_norm/gamma''' )
__UpperCAmelCase = get_masked_lm_array('''layer_norm/beta''' )
__UpperCAmelCase = get_masked_lm_array('''embedding_table''' )
# Pooling
__UpperCAmelCase = BertPooler(config=UpperCamelCase__ )
__UpperCAmelCase = get_encoder_array('''_pooler_layer/kernel''' )
__UpperCAmelCase = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(UpperCamelCase__ )
# Integration test - should load without any errors ;)
__UpperCAmelCase = BertForMaskedLM.from_pretrained(UpperCamelCase__ )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
__lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 262 | '''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class A ( pl.LightningModule ):
def __init__( self : Dict , __a : List[str] ) -> Tuple:
super().__init__()
__UpperCAmelCase = model
__UpperCAmelCase = 2
__UpperCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case__ ( self : int ) -> int:
pass
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
# load longformer model from model identifier
__UpperCAmelCase = LongformerModel.from_pretrained(UpperCamelCase__ )
__UpperCAmelCase = LightningModel(UpperCamelCase__ )
__UpperCAmelCase = torch.load(UpperCamelCase__ , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
__UpperCAmelCase = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCamelCase__ )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 262 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 371 |
import sys
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Union[str, Any]:
UpperCamelCase_ = len(UpperCamelCase_ )
UpperCamelCase_ = [[0 for x in range(UpperCamelCase_ )] for x in range(UpperCamelCase_ )]
UpperCamelCase_ = [[0 for x in range(UpperCamelCase_ )] for x in range(UpperCamelCase_ )]
for chain_length in range(2 , UpperCamelCase_ ):
for a in range(1 , n - chain_length + 1 ):
UpperCamelCase_ = a + chain_length - 1
UpperCamelCase_ = sys.maxsize
for c in range(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCamelCase_ = cost
UpperCamelCase_ = c
return matrix, sol
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
if i == j:
print("A" + str(UpperCamelCase_ ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(UpperCamelCase_ , UpperCamelCase_ , optimal_solution[i][j] )
print_optiomal_solution(UpperCamelCase_ , optimal_solution[i][j] + 1 , UpperCamelCase_ )
print(")" , end=" " )
def lowerCAmelCase_ ( ) -> Any:
UpperCamelCase_ = [30, 35, 15, 5, 10, 20, 25]
UpperCamelCase_ = len(UpperCamelCase_ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCamelCase_ , UpperCamelCase_ = matrix_chain_order(UpperCamelCase_ )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(UpperCamelCase_ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 371 | 1 |
'''simple docstring'''
__magic_name__ = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
__magic_name__ = {value: key for key, value in encode_dict.items()}
def lowerCamelCase ( lowerCamelCase : str):
A_ : int = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""")
return encoded
def lowerCamelCase ( lowerCamelCase : str):
if set(SCREAMING_SNAKE_CASE__) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""")
A_ : Optional[int] = ""
for word in coded.split():
while len(SCREAMING_SNAKE_CASE__) != 0:
decoded += decode_dict[word[:5]]
A_ : Optional[int] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 665 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = PegasusConfig
UpperCAmelCase_ = {}
UpperCAmelCase_ = "gelu"
def __init__( self : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Tuple=1_3, _UpperCAmelCase : int=7, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=False, _UpperCAmelCase : Union[str, Any]=9_9, _UpperCAmelCase : Optional[Any]=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : Tuple=4, _UpperCAmelCase : str=3_7, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : Dict=4_0, _UpperCAmelCase : Any=2, _UpperCAmelCase : int=1, _UpperCAmelCase : str=0, ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = seq_length
SCREAMING_SNAKE_CASE__ : int = is_training
SCREAMING_SNAKE_CASE__ : int = use_labels
SCREAMING_SNAKE_CASE__ : Tuple = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : Dict = pad_token_id
SCREAMING_SNAKE_CASE__ : Tuple = bos_token_id
def A_ ( self : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.concat([input_ids, eos_tensor], axis=1 )
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_pegasus_inputs_dict(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return config, inputs_dict
def A_ ( self : Union[str, Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFPegasusModel(config=_UpperCAmelCase ).get_decoder()
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : str = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs_dict["head_mask"]
SCREAMING_SNAKE_CASE__ : int = 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, head_mask=_UpperCAmelCase, use_cache=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : int = ids_tensor((self.batch_size, 3), config.vocab_size )
SCREAMING_SNAKE_CASE__ : str = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([input_ids, next_tokens], axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(ids_tensor((1,), output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-3 )
def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[str]=None , ) -> Any:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase_ = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = TFPegasusModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self, config_class=_UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
UpperCAmelCase_ = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase_ = "google/pegasus-xsum"
@cached_property
def A_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def A_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def A_ ( self : str, **_UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.translate_src_text(**_UpperCAmelCase )
assert self.expected_text == generated_words
def A_ ( self : Any, **_UpperCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.src_text, **_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors="tf" )
SCREAMING_SNAKE_CASE__ : List[str] = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_UpperCAmelCase )
return generated_words
@slow
def A_ ( self : List[Any] ) -> Any:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 663 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def A ( lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = []
for part_id in partition_order:
UpperCamelCase = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(lowercase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def A ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
UpperCamelCase = spark.range(100 ).repartition(1 )
UpperCamelCase = Spark(lowercase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def A ( ) -> Dict:
'''simple docstring'''
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
UpperCamelCase = spark.range(10 ).repartition(2 )
UpperCamelCase = [1, 0]
UpperCamelCase = _generate_iterable_examples(lowercase , lowercase ) # Reverse the partitions.
UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , lowercase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
UpperCamelCase , UpperCamelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
UpperCamelCase = spark.range(10 ).repartition(1 )
UpperCamelCase = SparkExamplesIterable(lowercase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowercase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def A ( ) -> str:
'''simple docstring'''
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
UpperCamelCase = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
UpperCamelCase = lambda lowercase : x.reverse()
UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , [2, 1, 0] )
UpperCamelCase = SparkExamplesIterable(lowercase ).shuffle_data_sources(lowercase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowercase ):
UpperCamelCase , UpperCamelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A ( ) -> str:
'''simple docstring'''
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
UpperCamelCase = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
UpperCamelCase = SparkExamplesIterable(lowercase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , [0, 2] )
for i, (row_id, row_dict) in enumerate(lowercase ):
UpperCamelCase , UpperCamelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCamelCase = SparkExamplesIterable(lowercase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , [1, 3] )
for i, (row_id, row_dict) in enumerate(lowercase ):
UpperCamelCase , UpperCamelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
UpperCamelCase = spark.range(100 ).repartition(1 )
UpperCamelCase = Spark(lowercase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 711 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_UpperCAmelCase : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
_UpperCAmelCase : List[str] = {"facebook/blenderbot_small-90M": 512}
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
UpperCamelCase = set(lowercase )
return pairs
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Optional[Any] = VOCAB_FILES_NAMES
__lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_="__start__" , A_="__end__" , A_="__unk__" , A_="__null__" , **A_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , **A_ )
with open(A_ , encoding='utf-8' ) as vocab_handle:
UpperCamelCase = json.load(A_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
UpperCamelCase = merges_handle.read().split('\n' )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in merges]
UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase = {}
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = re.sub('([.,!?()])' , r' \1' , A_ )
UpperCamelCase = re.sub('(\')' , r' \1 ' , A_ )
UpperCamelCase = re.sub(r'\s{2,}' , ' ' , A_ )
if "\n" in token:
UpperCamelCase = token.replace('\n' , ' __newln__' )
UpperCamelCase = token.split(' ' )
UpperCamelCase = []
for token in tokens:
if not len(A_ ):
continue
UpperCamelCase = token.lower()
UpperCamelCase = tuple(A_ )
UpperCamelCase = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
UpperCamelCase = get_pairs(A_ )
if not pairs:
words.append(A_ )
continue
while True:
UpperCamelCase = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(A_ ):
try:
UpperCamelCase = word.index(A_ , A_ )
new_word.extend(word[i:j] )
UpperCamelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(A_ )
UpperCamelCase = new_word
if len(A_ ) == 1:
break
else:
UpperCamelCase = get_pairs(A_ )
UpperCamelCase = '@@ '.join(A_ )
UpperCamelCase = word[:-4]
UpperCamelCase = word
words.append(A_ )
return " ".join(A_ )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = re.findall(r'\S+\n?' , A_ )
for token in words:
split_tokens.extend(list(self.bpe(A_ ).split(' ' ) ) )
return split_tokens
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = token.lower()
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
return self.decoder.get(A_ , self.unk_token )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = ' '.join(A_ ).replace('@@ ' , '' ).strip()
return out_string
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
UpperCamelCase = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
UpperCamelCase = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
| 3 | 0 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def snake_case_ ( ) -> str:
lowerCAmelCase_ = torch.nn.Linear(2 , 4)
lowerCAmelCase_ = torch.optim.AdamW(model.parameters() , lr=1.0)
lowerCAmelCase_ = torch.optim.lr_scheduler.OneCycleLR(__snake_case , max_lr=0.0_1 , steps_per_epoch=2 , epochs=1)
lowerCAmelCase_ = DataLoader(TensorDataset(torch.tensor([1, 2, 3])))
lowerCAmelCase_ = DataLoader(TensorDataset(torch.tensor([4, 5, 6])))
return model, optimizer, scheduler, train_dl, valid_dl
def snake_case_ ( __snake_case : Any) -> List[Any]:
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def snake_case_ ( __snake_case : Optional[int]) -> Any:
lowerCAmelCase_ = torch.nn.Linear(*tuple(model.weight.T.shape)).state_dict()
model.load_state_dict(__snake_case)
class __UpperCAmelCase ( __a ):
@require_cuda
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(_lowerCamelCase ):
lowerCAmelCase_ = Accelerator(cpu=_lowerCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ = GradientState()
assert state.num_steps == 1
lowerCAmelCase_ = 4
assert state.num_steps == 4
assert state.sync_gradients is True
lowerCAmelCase_ = False
assert state.sync_gradients is False
GradientState._reset_state()
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = create_components()
(
(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,
) = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = create_components()
accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def UpperCAmelCase_ ( self ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*_lowerCamelCase , **_lowerCamelCase ):
pass
with patch('''torch.cuda.set_device''' , _lowerCamelCase ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
lowerCAmelCase_ = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = create_components()
accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = get_signature(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowerCamelCase )
# make sure random weights don't match
load_random_weights(_lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCamelCase ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(_lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCamelCase ) ) < 1E-3 )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = create_components()
accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = get_signature(_lowerCamelCase )
# saving hook
def save_config(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(_lowerCamelCase , '''data.json''' ) , '''w''' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
# loading hook
def load_config(_lowerCamelCase , _lowerCamelCase ):
with open(os.path.join(_lowerCamelCase , '''data.json''' ) , '''r''' ) as f:
lowerCAmelCase_ = json.load(_lowerCamelCase )
lowerCAmelCase_ = config['''class_name''']
lowerCAmelCase_ = accelerator.register_save_state_pre_hook(_lowerCamelCase )
lowerCAmelCase_ = accelerator.register_load_state_pre_hook(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowerCamelCase )
# make sure random weights don't match with hooks
load_random_weights(_lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCamelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
lowerCAmelCase_ = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(_lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCamelCase ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowerCamelCase )
# make sure random weights don't match with hooks removed
load_random_weights(_lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCamelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
lowerCAmelCase_ = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(_lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCamelCase ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = create_components()
lowerCAmelCase_ = None
# This should work
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.assertTrue(dummy_obj is None )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = create_components()
lowerCAmelCase_ = [1, 2, 3]
# This should work
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.assertEqual(
getattr(_lowerCamelCase , '''_is_accelerate_prepared''' , _lowerCamelCase ) , _lowerCamelCase , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(_lowerCamelCase , '''_is_accelerate_prepared''' , _lowerCamelCase ) , _lowerCamelCase , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(_lowerCamelCase , '''_is_accelerate_prepared''' , _lowerCamelCase ) , _lowerCamelCase , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(_lowerCamelCase , '''_is_accelerate_prepared''' , _lowerCamelCase ) , _lowerCamelCase , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(_lowerCamelCase , '''_is_accelerate_prepared''' , _lowerCamelCase ) , _lowerCamelCase , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(_lowerCamelCase , '''_is_accelerate_prepared''' , _lowerCamelCase ) , _lowerCamelCase , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def UpperCAmelCase_ ( self ):
from transformers import AutoModelForCausalLM
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=_lowerCamelCase , device_map={'''''': 0} , )
lowerCAmelCase_ = Accelerator()
# This should work
lowerCAmelCase_ = accelerator.prepare(_lowerCamelCase )
@slow
@require_bnb
def UpperCAmelCase_ ( self ):
from transformers import AutoModelForCausalLM
lowerCAmelCase_ = Accelerator()
with init_empty_weights():
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
lowerCAmelCase_ = infer_auto_device_map(_lowerCamelCase )
lowerCAmelCase_ = '''cpu'''
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=_lowerCamelCase , load_in_abit=_lowerCamelCase , llm_inta_enable_fpaa_cpu_offload=_lowerCamelCase )
# This should not work and get value error
with self.assertRaises(_lowerCamelCase ):
lowerCAmelCase_ = accelerator.prepare(_lowerCamelCase )
@slow
@require_bnb
@require_multi_gpu
def UpperCAmelCase_ ( self ):
from transformers import AutoModelForCausalLM
lowerCAmelCase_ = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
lowerCAmelCase_ = infer_auto_device_map(_lowerCamelCase )
lowerCAmelCase_ = 1
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=_lowerCamelCase , device_map=_lowerCamelCase , )
lowerCAmelCase_ = Accelerator()
# This should not work and get value error
with self.assertRaises(_lowerCamelCase ):
lowerCAmelCase_ = accelerator.prepare(_lowerCamelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def UpperCAmelCase_ ( self ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
lowerCAmelCase_ = infer_auto_device_map(_lowerCamelCase )
lowerCAmelCase_ = 1
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=_lowerCamelCase , device_map=_lowerCamelCase , )
lowerCAmelCase_ = Accelerator()
# This should work
lowerCAmelCase_ = accelerator.prepare(_lowerCamelCase )
@require_cuda
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = torch.nn.Linear(10 , 10 )
lowerCAmelCase_ = torch.optim.SGD(model.parameters() , lr=0.01 )
lowerCAmelCase_ = Accelerator(cpu=_lowerCamelCase )
lowerCAmelCase_ = accelerator.prepare(_lowerCamelCase )
| 274 | '''simple docstring'''
from __future__ import annotations
def snake_case_ ( __snake_case : list[int | str]) -> None:
create_state_space_tree(__snake_case , [] , 0 , [0 for i in range(len(__snake_case))])
def snake_case_ ( __snake_case : list[int | str] , __snake_case : list[int | str] , __snake_case : int , __snake_case : list[int] , ) -> None:
if index == len(__snake_case):
print(__snake_case)
return
for i in range(len(__snake_case)):
if not index_used[i]:
current_sequence.append(sequence[i])
lowerCAmelCase_ = True
create_state_space_tree(__snake_case , __snake_case , index + 1 , __snake_case)
current_sequence.pop()
lowerCAmelCase_ = False
A_ : list[int | str] =[3, 1, 2, 4]
generate_all_permutations(sequence)
A_ : list[int | str] =["A", "B", "C"]
generate_all_permutations(sequence_a)
| 274 | 1 |
'''simple docstring'''
def __a ( A__ = 100_0000 ) -> int:
lowerCAmelCase = limit + 1
lowerCAmelCase = [0] * limit
for first_term in range(1 , A__ ):
for n in range(A__ , A__ , A__ ):
lowerCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 159 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : List[str] = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = 'transfo-xl'
lowerCAmelCase = ['mems']
lowerCAmelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=2_6_7_7_3_5 , SCREAMING_SNAKE_CASE : Dict=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0] , SCREAMING_SNAKE_CASE : Tuple=1_0_2_4 , SCREAMING_SNAKE_CASE : Tuple=1_0_2_4 , SCREAMING_SNAKE_CASE : Any=1_6 , SCREAMING_SNAKE_CASE : List[str]=6_4 , SCREAMING_SNAKE_CASE : int=4_0_9_6 , SCREAMING_SNAKE_CASE : Union[str, Any]=4 , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : int=1_8 , SCREAMING_SNAKE_CASE : Dict=1_6_0_0 , SCREAMING_SNAKE_CASE : Any=1_0_0_0 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Optional[Any]=0 , SCREAMING_SNAKE_CASE : Optional[Any]=-1 , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Dict=0.1 , SCREAMING_SNAKE_CASE : Tuple=0.0 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : int="normal" , SCREAMING_SNAKE_CASE : Optional[int]=0.0_1 , SCREAMING_SNAKE_CASE : List[str]=0.0_1 , SCREAMING_SNAKE_CASE : List[str]=0.0_2 , SCREAMING_SNAKE_CASE : Union[str, Any]=1E-5 , SCREAMING_SNAKE_CASE : List[str]=0 , **SCREAMING_SNAKE_CASE : Tuple , ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = vocab_size
lowerCAmelCase = []
self.cutoffs.extend(SCREAMING_SNAKE_CASE )
if proj_share_all_but_first:
lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase = [False] + [False] * len(self.cutoffs )
lowerCAmelCase = d_model
lowerCAmelCase = d_embed
lowerCAmelCase = d_head
lowerCAmelCase = d_inner
lowerCAmelCase = div_val
lowerCAmelCase = pre_lnorm
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = mem_len
lowerCAmelCase = same_length
lowerCAmelCase = attn_type
lowerCAmelCase = clamp_len
lowerCAmelCase = sample_softmax
lowerCAmelCase = adaptive
lowerCAmelCase = dropout
lowerCAmelCase = dropatt
lowerCAmelCase = untie_r
lowerCAmelCase = init
lowerCAmelCase = init_range
lowerCAmelCase = proj_init_std
lowerCAmelCase = init_std
lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def __A ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def __A ( self : Any , SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 159 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase__ ( __UpperCamelCase )-> Dict:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Optional[Any]:
UpperCamelCase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
UpperCamelCase = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
UpperCamelCase = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
UpperCamelCase = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
UpperCamelCase = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
UpperCamelCase = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
UpperCamelCase = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
UpperCamelCase = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
UpperCamelCase = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
UpperCamelCase = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
UpperCamelCase = key.replace("""image_encoder.module""" , """flava.image_model""" )
UpperCamelCase = key.replace("""text_encoder.module""" , """flava.text_model""" )
UpperCamelCase = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
UpperCamelCase = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
UpperCamelCase = key.replace("""text_projection""" , """flava.text_projection""" )
UpperCamelCase = key.replace("""image_projection""" , """flava.image_projection""" )
UpperCamelCase = value.float()
for key, value in codebook_state_dict.items():
UpperCamelCase = value
return upgrade
@torch.no_grad()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None )-> List[str]:
if config_path is not None:
UpperCamelCase = FlavaConfig.from_pretrained(__UpperCamelCase )
else:
UpperCamelCase = FlavaConfig()
UpperCamelCase = FlavaForPreTraining(__UpperCamelCase ).eval()
UpperCamelCase = convert_dalle_checkpoint(__UpperCamelCase , __UpperCamelCase , save_checkpoint=__UpperCamelCase )
if os.path.exists(__UpperCamelCase ):
UpperCamelCase = torch.load(__UpperCamelCase , map_location="""cpu""" )
else:
UpperCamelCase = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location="""cpu""" )
UpperCamelCase = upgrade_state_dict(__UpperCamelCase , __UpperCamelCase )
hf_model.load_state_dict(__UpperCamelCase )
UpperCamelCase = hf_model.state_dict()
UpperCamelCase = count_parameters(__UpperCamelCase )
UpperCamelCase = count_parameters(__UpperCamelCase ) + count_parameters(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 301 |
'''simple docstring'''
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
SCREAMING_SNAKE_CASE__ = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , )-> Optional[int]:
output_path.parent.mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__UpperCamelCase , __UpperCamelCase , f=output_path.as_posix() , input_names=__UpperCamelCase , output_names=__UpperCamelCase , dynamic_axes=__UpperCamelCase , do_constant_folding=__UpperCamelCase , use_external_data_format=__UpperCamelCase , enable_onnx_checker=__UpperCamelCase , opset_version=__UpperCamelCase , )
else:
export(
__UpperCamelCase , __UpperCamelCase , f=output_path.as_posix() , input_names=__UpperCamelCase , output_names=__UpperCamelCase , dynamic_axes=__UpperCamelCase , do_constant_folding=__UpperCamelCase , opset_version=__UpperCamelCase , )
@torch.no_grad()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False )-> Optional[Any]:
UpperCamelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
UpperCamelCase = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
UpperCamelCase = """cpu"""
UpperCamelCase = StableDiffusionPipeline.from_pretrained(__UpperCamelCase , torch_dtype=__UpperCamelCase ).to(__UpperCamelCase )
UpperCamelCase = Path(__UpperCamelCase )
# TEXT ENCODER
UpperCamelCase = pipeline.text_encoder.config.max_position_embeddings
UpperCamelCase = pipeline.text_encoder.config.hidden_size
UpperCamelCase = pipeline.tokenizer(
"""A sample prompt""" , padding="""max_length""" , max_length=pipeline.tokenizer.model_max_length , truncation=__UpperCamelCase , return_tensors="""pt""" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=__UpperCamelCase , dtype=torch.intaa )) , output_path=output_path / """text_encoder""" / """model.onnx""" , ordered_input_names=["""input_ids"""] , output_names=["""last_hidden_state""", """pooler_output"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """sequence"""},
} , opset=__UpperCamelCase , )
del pipeline.text_encoder
# UNET
UpperCamelCase = pipeline.unet.config.in_channels
UpperCamelCase = pipeline.unet.config.sample_size
UpperCamelCase = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase ),
torch.randn(2 ).to(device=__UpperCamelCase , dtype=__UpperCamelCase ),
torch.randn(2 , __UpperCamelCase , __UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase ),
False,
) , output_path=__UpperCamelCase , ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] , output_names=["""out_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""timestep""": {0: """batch"""},
"""encoder_hidden_states""": {0: """batch""", 1: """sequence"""},
} , opset=__UpperCamelCase , use_external_data_format=__UpperCamelCase , )
UpperCamelCase = str(unet_path.absolute().as_posix() )
UpperCamelCase = os.path.dirname(__UpperCamelCase )
UpperCamelCase = onnx.load(__UpperCamelCase )
# clean up existing tensor files
shutil.rmtree(__UpperCamelCase )
os.mkdir(__UpperCamelCase )
# collate external tensor files into one
onnx.save_model(
__UpperCamelCase , __UpperCamelCase , save_as_external_data=__UpperCamelCase , all_tensors_to_one_file=__UpperCamelCase , location="""weights.pb""" , convert_attribute=__UpperCamelCase , )
del pipeline.unet
# VAE ENCODER
UpperCamelCase = pipeline.vae
UpperCamelCase = vae_encoder.config.in_channels
UpperCamelCase = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
UpperCamelCase = lambda __UpperCamelCase , __UpperCamelCase : vae_encoder.encode(__UpperCamelCase , __UpperCamelCase )[0].sample()
onnx_export(
__UpperCamelCase , model_args=(
torch.randn(1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase ),
False,
) , output_path=output_path / """vae_encoder""" / """model.onnx""" , ordered_input_names=["""sample""", """return_dict"""] , output_names=["""latent_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__UpperCamelCase , )
# VAE DECODER
UpperCamelCase = pipeline.vae
UpperCamelCase = vae_decoder.config.latent_channels
UpperCamelCase = vae_decoder.config.out_channels
# forward only through the decoder part
UpperCamelCase = vae_encoder.decode
onnx_export(
__UpperCamelCase , model_args=(
torch.randn(1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__UpperCamelCase , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
UpperCamelCase = pipeline.safety_checker
UpperCamelCase = safety_checker.config.vision_config.num_channels
UpperCamelCase = safety_checker.config.vision_config.image_size
UpperCamelCase = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ).to(device=__UpperCamelCase , dtype=__UpperCamelCase ),
torch.randn(1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase ),
) , output_path=output_path / """safety_checker""" / """model.onnx""" , ordered_input_names=["""clip_input""", """images"""] , output_names=["""out_images""", """has_nsfw_concepts"""] , dynamic_axes={
"""clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""},
} , opset=__UpperCamelCase , )
del pipeline.safety_checker
UpperCamelCase = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" )
UpperCamelCase = pipeline.feature_extractor
else:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) , scheduler=pipeline.scheduler , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(__UpperCamelCase )
print("""ONNX pipeline saved to""" , __UpperCamelCase )
del pipeline
del onnx_pipeline
UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(__UpperCamelCase , provider="""CPUExecutionProvider""" )
print("""ONNX pipeline is loadable""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=1_4,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 301 | 1 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Optional[Any] = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 710 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
snake_case_ : Any = logging.get_logger(__name__)
class __lowerCamelCase ( lowercase ):
lowerCamelCase__: Optional[int] = ['''input_values''', '''padding_mask''']
def __init__( self , __snake_case = 1 , __snake_case = 2_4_0_0_0 , __snake_case = 0.0 , __snake_case = None , __snake_case = None , **__snake_case , ) -> Dict:
"""simple docstring"""
super().__init__(feature_size=__snake_case , sampling_rate=__snake_case , padding_value=__snake_case , **__snake_case )
UpperCAmelCase: Any = chunk_length_s
UpperCAmelCase: Optional[int] = overlap
@property
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , __snake_case , __snake_case = None , __snake_case = False , __snake_case = None , __snake_case = None , __snake_case = None , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
UpperCAmelCase: Optional[Any] = True
UpperCAmelCase: List[str] = bool(
isinstance(__snake_case , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
UpperCAmelCase: int = [np.asarray(__snake_case , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__snake_case , np.ndarray ):
UpperCAmelCase: Dict = np.asarray(__snake_case , dtype=np.floataa )
elif isinstance(__snake_case , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
UpperCAmelCase: Optional[Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase: Any = [np.asarray(__snake_case ).T]
# verify inputs are valid
for idx, example in enumerate(__snake_case ):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' )
UpperCAmelCase: Tuple = None
UpperCAmelCase: str = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
UpperCAmelCase: Any = min(array.shape[0] for array in raw_audio )
UpperCAmelCase: List[Any] = int(np.floor(max_length / self.chunk_stride ) )
UpperCAmelCase: List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
UpperCAmelCase: Optional[Any] = max(array.shape[0] for array in raw_audio )
UpperCAmelCase: Union[str, Any] = int(np.ceil(max_length / self.chunk_stride ) )
UpperCAmelCase: List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
UpperCAmelCase: int = "max_length"
else:
UpperCAmelCase: Optional[Any] = input_values
# normal padding on batch
if padded_inputs is None:
UpperCAmelCase: Union[str, Any] = self.pad(
__snake_case , max_length=__snake_case , truncation=__snake_case , padding=__snake_case , return_attention_mask=__snake_case , )
if padding:
UpperCAmelCase: str = padded_inputs.pop("attention_mask" )
UpperCAmelCase: Tuple = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
UpperCAmelCase: Optional[int] = example[..., None]
input_values.append(example.T )
UpperCAmelCase: str = input_values
if return_tensors is not None:
UpperCAmelCase: Optional[int] = padded_inputs.convert_to_tensors(__snake_case )
return padded_inputs
| 166 | 0 |
def a ( a ) ->Union[str, Any]:
'''simple docstring'''
if n_term == "":
return []
SCREAMING_SNAKE_CASE = []
for temp in range(int(lowerCAmelCase__ ) ):
series.append(F"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
__lowerCAmelCase = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term)) | 201 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase__ =logging.get_logger(__name__)
class UpperCamelCase__ ( enum.Enum ):
_SCREAMING_SNAKE_CASE : List[str] = 0
_SCREAMING_SNAKE_CASE : Optional[int] = 1
@add_end_docstrings(__lowercase )
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = "generated"
def __init__(self : Dict , *snake_case_ : Optional[Any] , **snake_case_ : List[Any] ):
super().__init__(*snake_case_ , **snake_case_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def lowerCAmelCase (self : Any , snake_case_ : Optional[Any]=None , snake_case_ : Dict=None , snake_case_ : List[Any]=None , snake_case_ : Dict=None , snake_case_ : Any=None , snake_case_ : int=None , **snake_case_ : List[str] , ):
__a : Dict = {}
if truncation is not None:
__a : str = truncation
__a : Tuple = generate_kwargs
__a : Optional[int] = {}
if return_tensors is not None and return_type is None:
__a : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__a : str = return_type
if clean_up_tokenization_spaces is not None:
__a : Union[str, Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
__a : Dict = self.tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
if len(snake_case_ ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
__a : Tuple = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase (self : List[Any] , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
return True
def lowerCAmelCase (self : List[Any] , *snake_case_ : str , snake_case_ : Dict ):
__a : Tuple = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , snake_case_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
__a : List[str] = ([prefix + arg for arg in args[0]],)
__a : List[Any] = True
elif isinstance(args[0] , snake_case_ ):
__a : str = (prefix + args[0],)
__a : int = False
else:
raise ValueError(
f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`" )
__a : Any = self.tokenizer(*snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__(self : int , *snake_case_ : Optional[int] , **snake_case_ : List[str] ):
__a : str = super().__call__(*snake_case_ , **snake_case_ )
if (
isinstance(args[0] , snake_case_ )
and all(isinstance(snake_case_ , snake_case_ ) for el in args[0] )
and all(len(snake_case_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def lowerCAmelCase (self : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any]=TruncationStrategy.DO_NOT_TRUNCATE , **snake_case_ : int ):
__a : Optional[int] = self._parse_and_tokenize(snake_case_ , truncation=snake_case_ , **snake_case_ )
return inputs
def lowerCAmelCase (self : Any , snake_case_ : List[str] , **snake_case_ : Union[str, Any] ):
if self.framework == "pt":
__a , __a : List[str] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
__a , __a : List[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
__a : Optional[Any] = generate_kwargs.get('''min_length''' , self.model.config.min_length )
__a : Union[str, Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(snake_case_ , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
__a : str = self.model.generate(**snake_case_ , **snake_case_ )
__a : Union[str, Any] = output_ids.shape[0]
if self.framework == "pt":
__a : Optional[Any] = output_ids.reshape(snake_case_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__a : int = tf.reshape(snake_case_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def lowerCAmelCase (self : Dict , snake_case_ : Dict , snake_case_ : List[str]=ReturnType.TEXT , snake_case_ : str=False ):
__a : Optional[int] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__a : Optional[Any] = {f"{self.return_name}_token_ids": output_ids}
elif return_type == ReturnType.TEXT:
__a : str = {
f"{self.return_name}_text": self.tokenizer.decode(
snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ , )
}
records.append(snake_case_ )
return records
@add_end_docstrings(__lowercase )
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Tuple = "summary"
def __call__(self : Optional[Any] , *snake_case_ : Optional[int] , **snake_case_ : List[str] ):
return super().__call__(*snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
if max_length < min_length:
logger.warning(f"Your min_length={min_length} must be inferior than your max_length={max_length}." )
if input_length < max_length:
logger.warning(
f"Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is "
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})" )
@add_end_docstrings(__lowercase )
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Dict = "translation"
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
if input_length > 0.9 * max_length:
logger.warning(
f"Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider "
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def lowerCAmelCase (self : Any , *snake_case_ : int , snake_case_ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , snake_case_ : Any=None , snake_case_ : Tuple=None ):
if getattr(self.tokenizer , '''_build_translation_inputs''' , snake_case_ ):
return self.tokenizer._build_translation_inputs(
*snake_case_ , return_tensors=self.framework , truncation=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ )
else:
return super()._parse_and_tokenize(*snake_case_ , truncation=snake_case_ )
def lowerCAmelCase (self : Optional[int] , snake_case_ : int=None , snake_case_ : str=None , **snake_case_ : Optional[Any] ):
__a , __a , __a : str = super()._sanitize_parameters(**snake_case_ )
if src_lang is not None:
__a : Optional[int] = src_lang
if tgt_lang is not None:
__a : Tuple = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__a : int = kwargs.get('''task''' , self.task )
__a : Union[str, Any] = task.split('''_''' )
if task and len(snake_case_ ) == 4:
# translation, XX, to YY
__a : str = items[1]
__a : str = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__(self : Optional[int] , *snake_case_ : Optional[Any] , **snake_case_ : Any ):
return super().__call__(*snake_case_ , **snake_case_ )
| 521 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
_UpperCamelCase = list[list[float | int]]
def _lowerCAmelCase( UpperCAmelCase_ : Matrix , UpperCAmelCase_ : Matrix ) -> Matrix:
lowerCAmelCase__ = len(UpperCAmelCase_ )
lowerCAmelCase__ = [[0 for _ in range(size + 1 )] for _ in range(UpperCAmelCase_ )]
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
for row in range(UpperCAmelCase_ ):
for col in range(UpperCAmelCase_ ):
lowerCAmelCase__ = matrix[row][col]
lowerCAmelCase__ = vector[row][0]
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
while row < size and col < size:
# pivoting
lowerCAmelCase__ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCAmelCase_ , UpperCAmelCase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowerCAmelCase__ ,lowerCAmelCase__ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , UpperCAmelCase_ ):
lowerCAmelCase__ = augmented[rowa][col] / augmented[row][col]
lowerCAmelCase__ = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , UpperCAmelCase_ ):
for row in range(UpperCAmelCase_ ):
lowerCAmelCase__ = augmented[row][col] / augmented[col][col]
for cola in range(UpperCAmelCase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(UpperCAmelCase_ )
]
def _lowerCAmelCase( UpperCAmelCase_ : list[int] ) -> Callable[[int], int]:
lowerCAmelCase__ = len(UpperCAmelCase_ )
lowerCAmelCase__ = [[0 for _ in range(UpperCAmelCase_ )] for _ in range(UpperCAmelCase_ )]
lowerCAmelCase__ = [[0] for _ in range(UpperCAmelCase_ )]
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
for x_val, y_val in enumerate(UpperCAmelCase_ ):
for col in range(UpperCAmelCase_ ):
lowerCAmelCase__ = (x_val + 1) ** (size - col - 1)
lowerCAmelCase__ = y_val
lowerCAmelCase__ = solve(UpperCAmelCase_ , UpperCAmelCase_ )
def interpolated_func(UpperCAmelCase_ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(UpperCAmelCase_ ) )
return interpolated_func
def _lowerCAmelCase( UpperCAmelCase_ : int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def _lowerCAmelCase( UpperCAmelCase_ : Callable[[int], int] = question_function , UpperCAmelCase_ : int = 10 ) -> int:
lowerCAmelCase__ = [func(UpperCAmelCase_ ) for x_val in range(1 , order + 1 )]
lowerCAmelCase__ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
lowerCAmelCase__ = 0
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
for poly in polynomials:
lowerCAmelCase__ = 1
while func(UpperCAmelCase_ ) == poly(UpperCAmelCase_ ):
x_val += 1
ret += poly(UpperCAmelCase_ )
return ret
if __name__ == "__main__":
print(f'{solution() = }')
| 211 |
'''simple docstring'''
def _lowerCAmelCase( UpperCAmelCase_ : str ) -> int:
assert column_title.isupper()
lowerCAmelCase__ = 0
lowerCAmelCase__ = len(UpperCAmelCase_ ) - 1
lowerCAmelCase__ = 0
while index >= 0:
lowerCAmelCase__ = (ord(column_title[index] ) - 64) * pow(26 , UpperCAmelCase_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 211 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
__lowerCAmelCase: Optional[Any] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class snake_case ( lowercase__, lowercase__, lowercase__, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int = StableDiffusionLatentUpscalePipeline
SCREAMING_SNAKE_CASE_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
SCREAMING_SNAKE_CASE_ : Dict = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
SCREAMING_SNAKE_CASE_ : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE_ : List[str] = frozenset([] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
@property
def lowercase_ ( self : Optional[Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: Dict = 1
__lowerCAmelCase: Optional[Any] = 4
__lowerCAmelCase: Optional[Any] = (1_6, 1_6)
__lowerCAmelCase: Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCAmelCase__)
return image
def lowercase_ ( self : Optional[int])-> Any:
'''simple docstring'''
torch.manual_seed(0)
__lowerCAmelCase: int = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=UpperCAmelCase__ , block_out_channels=[3_2, 3_2, 6_4, 6_4] , time_cond_proj_dim=1_6_0 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=3_2 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=UpperCAmelCase__ , only_cross_attention=UpperCAmelCase__ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
__lowerCAmelCase: int = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4, 6_4] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
__lowerCAmelCase: Tuple = EulerDiscreteScheduler(prediction_type="sample")
__lowerCAmelCase: Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="quick_gelu" , projection_dim=5_1_2 , )
__lowerCAmelCase: Any = CLIPTextModel(UpperCAmelCase__)
__lowerCAmelCase: Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
__lowerCAmelCase: Tuple = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowercase_ ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=0)-> Optional[Any]:
'''simple docstring'''
if str(UpperCAmelCase__).startswith("mps"):
__lowerCAmelCase: int = torch.manual_seed(UpperCAmelCase__)
else:
__lowerCAmelCase: Any = torch.Generator(device=UpperCAmelCase__).manual_seed(UpperCAmelCase__)
__lowerCAmelCase: Optional[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : str)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = '''cpu'''
__lowerCAmelCase: Union[str, Any] = self.get_dummy_components()
__lowerCAmelCase: Tuple = self.pipeline_class(**UpperCAmelCase__)
pipe.to(UpperCAmelCase__)
pipe.set_progress_bar_config(disable=UpperCAmelCase__)
__lowerCAmelCase: List[str] = self.get_dummy_inputs(UpperCAmelCase__)
__lowerCAmelCase: int = pipe(**UpperCAmelCase__).images
__lowerCAmelCase: str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_5_6, 2_5_6, 3))
__lowerCAmelCase: Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055])
__lowerCAmelCase: List[str] = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(UpperCAmelCase__ , 1e-3)
def lowercase_ ( self : Optional[Any])-> Tuple:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3)
def lowercase_ ( self : List[str])-> Optional[Any]:
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3)
def lowercase_ ( self : Optional[Any])-> Optional[Any]:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3)
def lowercase_ ( self : Optional[Any])-> List[str]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7e-3)
def lowercase_ ( self : int)-> str:
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3)
def lowercase_ ( self : Union[str, Any])-> List[Any]:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3)
def lowercase_ ( self : str)-> List[Any]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3)
def lowercase_ ( self : Any)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
__lowerCAmelCase: Optional[int] = self.get_dummy_components()
__lowerCAmelCase: Any = self.pipeline_class(**UpperCAmelCase__)
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=UpperCAmelCase__)
pipe.to(UpperCAmelCase__)
pipe.set_progress_bar_config(disable=UpperCAmelCase__)
__lowerCAmelCase: Optional[int] = self.get_dummy_inputs(UpperCAmelCase__)
__lowerCAmelCase: Any = 2
__lowerCAmelCase: Any = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__lowerCAmelCase: Optional[int] = getattr(UpperCAmelCase__ , scheduler_enum.name)
__lowerCAmelCase: Tuple = scheduler_cls.from_config(pipe.scheduler.config)
__lowerCAmelCase: Any = pipe(**UpperCAmelCase__)[0]
outputs.append(UpperCAmelCase__)
assert check_same_shape(UpperCAmelCase__)
@require_torch_gpu
@slow
class snake_case ( unittest.TestCase ):
def lowercase_ ( self : Tuple)-> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Dict)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = torch.manual_seed(3_3)
__lowerCAmelCase: Tuple = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa)
pipe.to("cuda")
__lowerCAmelCase: List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa)
upscaler.to("cuda")
__lowerCAmelCase: Tuple = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
__lowerCAmelCase: List[Any] = pipe(UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type="latent").images
__lowerCAmelCase: str = upscaler(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , num_inference_steps=2_0 , guidance_scale=0 , generator=UpperCAmelCase__ , output_type="np" , ).images[0]
__lowerCAmelCase: Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy")
assert np.abs((expected_image - image).mean()) < 5e-2
def lowercase_ ( self : List[str])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: int = torch.manual_seed(3_3)
__lowerCAmelCase: Any = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa)
upscaler.to("cuda")
__lowerCAmelCase: Tuple = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
__lowerCAmelCase: int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png")
__lowerCAmelCase: str = upscaler(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , num_inference_steps=2_0 , guidance_scale=0 , generator=UpperCAmelCase__ , output_type="np" , ).images[0]
__lowerCAmelCase: Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy")
assert np.abs((expected_image - image).max()) < 5e-2
| 346 |
'''simple docstring'''
from collections import defaultdict
def _lowerCAmelCase ( __magic_name__ : int ) -> int:
lowercase : Optional[Any] =1
lowercase : Union[str, Any] =True
for v in tree[start]:
if v not in visited:
ret += dfs(__magic_name__ )
if ret % 2 == 0:
cuts.append(__magic_name__ )
return ret
def _lowerCAmelCase ( ) -> int:
dfs(1 )
if __name__ == "__main__":
UpperCamelCase_ , UpperCamelCase_ = 10, 9
UpperCamelCase_ = defaultdict(list)
UpperCamelCase_ = {}
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 92 | 0 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : List[str] =VOCAB_FILES_NAMES
a : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
a : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : str =["input_ids", "attention_mask"]
a : int =TaTokenizer
a : List[int] =[]
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="</s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__=100 , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
lowerCAmelCase : Any = [f"""<extra_id_{i}>""" for i in range(snake_case__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowerCAmelCase : List[str] = len(set(filter(lambda snake_case__ : bool("extra_id_" in str(snake_case__ ) ) , snake_case__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , extra_ids=snake_case__ , additional_special_tokens=snake_case__ , **snake_case__ , )
lowerCAmelCase : int = vocab_file
lowerCAmelCase : int = False if not self.vocab_file else True
lowerCAmelCase : Union[str, Any] = extra_ids
@staticmethod
def lowercase__ ( snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowerCAmelCase : List[Any] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , snake_case__ , )
return max_model_length
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase : Optional[int] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : Tuple = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowerCAmelCase : Dict = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowercase__ ( self ):
"""simple docstring"""
return list(
set(filter(lambda snake_case__ : bool(re.search(r"<extra_id_\d+>" , snake_case__ ) ) is not None , self.additional_special_tokens ) ) )
def lowercase__ ( self ):
"""simple docstring"""
return [self.convert_tokens_to_ids(snake_case__ ) for token in self.get_sentinel_tokens()]
| 681 |
"""simple docstring"""
from math import factorial
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0 ):
'''simple docstring'''
return sum(int(SCREAMING_SNAKE_CASE ) for x in str(factorial(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 681 | 1 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , ):
'''simple docstring'''
a_ : Tuple = parent
a_ : List[str] = 13
a_ : Dict = 7
a_ : Optional[int] = 30
a_ : str = self.seq_length + self.mem_len
a_ : List[str] = 15
a_ : Optional[int] = True
a_ : Any = True
a_ : str = 99
a_ : Any = [10, 50, 80]
a_ : Dict = 32
a_ : Union[str, Any] = 32
a_ : List[Any] = 4
a_ : Optional[int] = 8
a_ : Optional[Any] = 1_28
a_ : Tuple = 2
a_ : List[str] = 2
a_ : List[Any] = None
a_ : List[Any] = 1
a_ : List[Any] = 0
a_ : Optional[Any] = 3
a_ : Optional[Any] = self.vocab_size - 1
a_ : int = 0.01
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Any = None
if self.use_labels:
a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Optional[int] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def _lowerCAmelCase ( self ):
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Optional[int] = TFTransfoXLModel(lowerCAmelCase_ )
a_ , a_ : List[str] = model(lowerCAmelCase_ ).to_tuple()
a_ : int = {"""input_ids""": input_ids_a, """mems""": mems_a}
a_ , a_ : List[str] = model(lowerCAmelCase_ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Tuple = TFTransfoXLLMHeadModel(lowerCAmelCase_ )
a_ , a_ : Dict = model(lowerCAmelCase_ ).to_tuple()
a_ : Any = {"""input_ids""": input_ids_a, """labels""": lm_labels}
a_ , a_ : Optional[Any] = model(lowerCAmelCase_ ).to_tuple()
a_ , a_ : str = model([input_ids_a, mems_a] ).to_tuple()
a_ : List[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
a_ , a_ : str = model(lowerCAmelCase_ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Tuple = TFTransfoXLForSequenceClassification(lowerCAmelCase_ )
a_ : int = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = self.prepare_config_and_inputs()
((a_) , (a_) , (a_) , (a_)) : Optional[Any] = config_and_inputs
a_ : str = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
"""simple docstring"""
a_ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
a_ = () if is_tf_available() else ()
a_ = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
a_ = False
a_ = False
a_ = False
a_ = False
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = TFTransfoXLModelTester(self )
a_ : List[str] = ConfigTester(self , config_class=lowerCAmelCase_ , d_embed=37 )
def _lowerCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
'''simple docstring'''
self.model_tester.set_seed()
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
self.model_tester.set_seed()
a_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Any = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
a_ : Any = model_class(lowerCAmelCase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
a_ : List[str] = model.get_output_embeddings()
assert isinstance(lowerCAmelCase_ , tf.keras.layers.Layer )
a_ : List[Any] = model.get_bias()
assert name is None
else:
a_ : Optional[int] = model.get_output_embeddings()
assert x is None
a_ : List[Any] = model.get_bias()
assert name is None
def _lowerCAmelCase ( self ):
'''simple docstring'''
pass
@slow
def _lowerCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : List[str] = TFTransfoXLModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
pass
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
a_ : Dict = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
a_ : Any = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
a_ : Optional[int] = model.generate(lowerCAmelCase_ , max_length=2_00 , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase_ )
| 577 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCAmelCase_ = 16 , lowerCAmelCase_ = 88 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = "geglu" , lowerCAmelCase_ = True , lowerCAmelCase_ = True , ):
'''simple docstring'''
super().__init__()
a_ : List[Any] = num_attention_heads
a_ : Optional[int] = attention_head_dim
a_ : Union[str, Any] = num_attention_heads * attention_head_dim
a_ : Any = in_channels
a_ : List[str] = torch.nn.GroupNorm(num_groups=lowerCAmelCase_ , num_channels=lowerCAmelCase_ , eps=1E-6 , affine=lowerCAmelCase_ )
a_ : List[Any] = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ )
# 3. Define transformers blocks
a_ : Any = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , dropout=lowerCAmelCase_ , cross_attention_dim=lowerCAmelCase_ , activation_fn=lowerCAmelCase_ , attention_bias=lowerCAmelCase_ , double_self_attention=lowerCAmelCase_ , norm_elementwise_affine=lowerCAmelCase_ , )
for d in range(lowerCAmelCase_ )
] )
a_ : List[str] = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=1 , lowerCAmelCase_=None , lowerCAmelCase_ = True , ):
'''simple docstring'''
a_ , a_ , a_ , a_ : Optional[Any] = hidden_states.shape
a_ : List[Any] = batch_frames // num_frames
a_ : Tuple = hidden_states
a_ : str = hidden_states[None, :].reshape(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Union[str, Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
a_ : Optional[Any] = self.norm(lowerCAmelCase_ )
a_ : str = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Union[str, Any] = self.proj_in(lowerCAmelCase_ )
# 2. Blocks
for block in self.transformer_blocks:
a_ : List[str] = block(
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , timestep=lowerCAmelCase_ , cross_attention_kwargs=lowerCAmelCase_ , class_labels=lowerCAmelCase_ , )
# 3. Output
a_ : Tuple = self.proj_out(lowerCAmelCase_ )
a_ : int = (
hidden_states[None, None, :]
.reshape(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
a_ : Tuple = hidden_states.reshape(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Optional[Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=lowerCAmelCase_ )
| 577 | 1 |
'''simple docstring'''
def lowercase_ ( _lowercase , _lowercase ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'{price_plus_tax(100, 0.25) = }')
print(f'{price_plus_tax(1_25.50, 0.05) = }')
| 357 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowercase : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__lowercase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 357 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def lowercase ( a__ : Callable[[int | float], int | float] , a__ : int | float , a__ : int | float , a__ : int = 100 , ) -> float:
_UpperCamelCase = x_start
_UpperCamelCase = fnc(a__ )
_UpperCamelCase = 0.0
for _ in range(a__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_UpperCamelCase = (x_end - x_start) / steps + xa
_UpperCamelCase = fnc(a__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_UpperCamelCase = xa
_UpperCamelCase = fxa
return area
if __name__ == "__main__":
def lowercase ( a__ : str ) -> Any:
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
UpperCAmelCase = 10
while i <= 100_000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 420 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 420 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : Any = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE_ : Any = k.replace(__a , __a )
if k.startswith('''encoder''' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = k.replace('''.attn''' , '''.self_attn''' )
SCREAMING_SNAKE_CASE_ : List[Any] = k.replace('''norm1''' , '''self_attn_layer_norm''' )
SCREAMING_SNAKE_CASE_ : Dict = k.replace('''norm2''' , '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
SCREAMING_SNAKE_CASE_ : int = k.replace('''norm1''' , '''self_attn_layer_norm''' )
SCREAMING_SNAKE_CASE_ : Tuple = k.replace('''norm2''' , '''encoder_attn_layer_norm''' )
SCREAMING_SNAKE_CASE_ : List[str] = k.replace('''norm3''' , '''final_layer_norm''' )
return k
def _A (__a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
SCREAMING_SNAKE_CASE_ : List[Any] = sd.pop(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = k.replace('''layernorm_embedding''' , '''layer_norm''' )
assert new_k not in sd
SCREAMING_SNAKE_CASE_ : Dict = v
UpperCAmelCase_ : str = ["""START"""]
@torch.no_grad()
def _A (__a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = torch.load(__a , map_location='''cpu''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model['''model''']
SCREAMING_SNAKE_CASE_ : Tuple = BlenderbotConfig.from_json_file(__a )
SCREAMING_SNAKE_CASE_ : int = BlenderbotForConditionalGeneration(__a )
SCREAMING_SNAKE_CASE_ : Dict = m.model.state_dict().keys()
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
SCREAMING_SNAKE_CASE_ : Tuple = rename_state_dict_key(__a )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
SCREAMING_SNAKE_CASE_ : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__a )
m.model.load_state_dict(__a , strict=__a )
m.half()
m.save_pretrained(__a )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 710 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "data2vec-text"
def __init__( self : Any , lowercase_ : Any=30522 , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=12 , lowercase_ : Dict=12 , lowercase_ : List[Any]=3072 , lowercase_ : str="gelu" , lowercase_ : int=0.1 , lowercase_ : Dict=0.1 , lowercase_ : str=512 , lowercase_ : Optional[int]=2 , lowercase_ : int=0.02 , lowercase_ : int=1e-12 , lowercase_ : Any=1 , lowercase_ : Any=0 , lowercase_ : List[Any]=2 , lowercase_ : Tuple="absolute" , lowercase_ : Optional[int]=True , lowercase_ : int=None , **lowercase_ : Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : str = position_embedding_type
SCREAMING_SNAKE_CASE_ : Optional[int] = use_cache
SCREAMING_SNAKE_CASE_ : str = classifier_dropout
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE_ : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 176 | 0 |
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__lowercase : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
super().__init__(*snake_case__ , **snake_case__ )
requires_backends(self , '''decord''' )
self.check_model_type(snake_case__ )
def _lowerCAmelCase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None ) -> Any:
'''simple docstring'''
lowerCamelCase_ = {}
if frame_sampling_rate is not None:
lowerCamelCase_ = frame_sampling_rate
if num_frames is not None:
lowerCamelCase_ = num_frames
lowerCamelCase_ = {}
if top_k is not None:
lowerCamelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return super().__call__(snake_case__ , **snake_case__ )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=1 ) -> int:
'''simple docstring'''
if num_frames is None:
lowerCamelCase_ = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
lowerCamelCase_ = BytesIO(requests.get(snake_case__ ).content )
lowerCamelCase_ = VideoReader(snake_case__ )
videoreader.seek(0 )
lowerCamelCase_ = 0
lowerCamelCase_ = num_frames * frame_sampling_rate - 1
lowerCamelCase_ = np.linspace(snake_case__ , snake_case__ , num=snake_case__ , dtype=np.intaa )
lowerCamelCase_ = videoreader.get_batch(snake_case__ ).asnumpy()
lowerCamelCase_ = list(snake_case__ )
lowerCamelCase_ = self.image_processor(snake_case__ , return_tensors=self.framework )
return model_inputs
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.model(**snake_case__ )
return model_outputs
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__=5 ) -> str:
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowerCamelCase_ = self.model.config.num_labels
if self.framework == "pt":
lowerCamelCase_ = model_outputs.logits.softmax(-1 )[0]
lowerCamelCase_ , lowerCamelCase_ = probs.topk(snake_case__ )
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
lowerCamelCase_ = scores.tolist()
lowerCamelCase_ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(snake_case__ , snake_case__ )] | 142 | def lowerCamelCase ( UpperCamelCase : int ) -> bool:
_lowerCamelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCamelCase ( UpperCamelCase : int = 50_00 ) -> int:
_lowerCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , UpperCamelCase )]
for i, pentagonal_i in enumerate(UpperCamelCase ):
for j in range(UpperCamelCase , len(UpperCamelCase ) ):
_lowerCamelCase = pentagonal_nums[j]
_lowerCamelCase = pentagonal_i + pentagonal_j
_lowerCamelCase = pentagonal_j - pentagonal_i
if is_pentagonal(UpperCamelCase ) and is_pentagonal(UpperCamelCase ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''') | 544 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __a ( unittest.TestCase ):
def UpperCamelCase ( self : str)-> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self : Optional[Any])-> str:
__lowerCAmelCase =1
__lowerCAmelCase =3
__lowerCAmelCase =(32, 32)
__lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(lowerCamelCase_)
return image
@property
def UpperCamelCase ( self : Union[str, Any])-> Dict:
torch.manual_seed(0)
__lowerCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self : Tuple)-> Union[str, Any]:
torch.manual_seed(0)
__lowerCAmelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self : Any)-> Union[str, Any]:
torch.manual_seed(0)
__lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowerCamelCase_)
@property
def UpperCamelCase ( self : Optional[Any])-> str:
def extract(*snake_case_ : Union[str, Any] , **snake_case_ : List[str]):
class __a :
def __init__( self : Any)-> Union[str, Any]:
__lowerCAmelCase =torch.ones([0])
def UpperCamelCase ( self : Any , snake_case_ : Tuple)-> int:
self.pixel_values.to(lowerCamelCase_)
return self
return Out()
return extract
def UpperCamelCase ( self : Union[str, Any])-> int:
__lowerCAmelCase ='''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase =self.dummy_cond_unet
__lowerCAmelCase =DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
__lowerCAmelCase =self.dummy_vae
__lowerCAmelCase =self.dummy_text_encoder
__lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
# make sure here that pndm scheduler skips prk
__lowerCAmelCase =StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
__lowerCAmelCase =sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
__lowerCAmelCase ='''A painting of a squirrel eating a burger'''
__lowerCAmelCase =torch.Generator(device=lowerCamelCase_).manual_seed(0)
__lowerCAmelCase =sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""")
__lowerCAmelCase =output.images
__lowerCAmelCase =torch.Generator(device=lowerCamelCase_).manual_seed(0)
__lowerCAmelCase =sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowerCamelCase_ , )[0]
__lowerCAmelCase =image[0, -3:, -3:, -1]
__lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase =np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def UpperCamelCase ( self : int)-> Any:
__lowerCAmelCase ='''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase =self.dummy_cond_unet
__lowerCAmelCase =PNDMScheduler(skip_prk_steps=lowerCamelCase_)
__lowerCAmelCase =self.dummy_vae
__lowerCAmelCase =self.dummy_text_encoder
__lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
# make sure here that pndm scheduler skips prk
__lowerCAmelCase =StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
__lowerCAmelCase =sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
__lowerCAmelCase ='''A painting of a squirrel eating a burger'''
__lowerCAmelCase =torch.Generator(device=lowerCamelCase_).manual_seed(0)
__lowerCAmelCase =sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""")
__lowerCAmelCase =output.images
__lowerCAmelCase =torch.Generator(device=lowerCamelCase_).manual_seed(0)
__lowerCAmelCase =sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowerCamelCase_ , )[0]
__lowerCAmelCase =image[0, -3:, -3:, -1]
__lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase =np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def UpperCamelCase ( self : str)-> List[Any]:
__lowerCAmelCase =StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=lowerCamelCase_)
assert isinstance(lowerCamelCase_ , lowerCamelCase_)
assert isinstance(pipe.scheduler , lowerCamelCase_)
assert pipe.safety_checker is None
__lowerCAmelCase =pipe("""example prompt""" , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_)
__lowerCAmelCase =StableDiffusionPipeline.from_pretrained(lowerCamelCase_)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowerCAmelCase =pipe("""example prompt""" , num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""")
def UpperCamelCase ( self : Tuple)-> int:
__lowerCAmelCase =self.dummy_cond_unet
__lowerCAmelCase =PNDMScheduler(skip_prk_steps=lowerCamelCase_)
__lowerCAmelCase =self.dummy_vae
__lowerCAmelCase =self.dummy_text_encoder
__lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
# put models in fp16
__lowerCAmelCase =unet.half()
__lowerCAmelCase =vae.half()
__lowerCAmelCase =bert.half()
# make sure here that pndm scheduler skips prk
__lowerCAmelCase =StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
__lowerCAmelCase =sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
__lowerCAmelCase ='''A painting of a squirrel eating a burger'''
__lowerCAmelCase =sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""").images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __a ( unittest.TestCase ):
def UpperCamelCase ( self : int)-> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : List[str])-> str:
__lowerCAmelCase =StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowerCamelCase_)
__lowerCAmelCase =LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
__lowerCAmelCase =sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
__lowerCAmelCase =(
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
__lowerCAmelCase =40_03_66_03_46
__lowerCAmelCase =7
# without safety guidance (sld_guidance_scale = 0)
__lowerCAmelCase =torch.manual_seed(lowerCamelCase_)
__lowerCAmelCase =sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
__lowerCAmelCase =output.images
__lowerCAmelCase =image[0, -3:, -3:, -1]
__lowerCAmelCase =[0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
# without safety guidance (strong configuration)
__lowerCAmelCase =torch.manual_seed(lowerCamelCase_)
__lowerCAmelCase =sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__lowerCAmelCase =output.images
__lowerCAmelCase =image[0, -3:, -3:, -1]
__lowerCAmelCase =[0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCamelCase ( self : List[Any])-> Tuple:
__lowerCAmelCase =StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowerCamelCase_)
__lowerCAmelCase =LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
__lowerCAmelCase =sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
__lowerCAmelCase ='''padme amidala taking a bath artwork, safe for work, no nudity'''
__lowerCAmelCase =27_34_97_17_55
__lowerCAmelCase =7
__lowerCAmelCase =torch.manual_seed(lowerCamelCase_)
__lowerCAmelCase =sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
__lowerCAmelCase =output.images
__lowerCAmelCase =image[0, -3:, -3:, -1]
__lowerCAmelCase =[0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
__lowerCAmelCase =torch.manual_seed(lowerCamelCase_)
__lowerCAmelCase =sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__lowerCAmelCase =output.images
__lowerCAmelCase =image[0, -3:, -3:, -1]
__lowerCAmelCase =[0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCamelCase ( self : Any)-> Any:
__lowerCAmelCase =StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""")
__lowerCAmelCase =sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
__lowerCAmelCase =(
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
__lowerCAmelCase =10_44_35_52_34
__lowerCAmelCase =12
__lowerCAmelCase =torch.manual_seed(lowerCamelCase_)
__lowerCAmelCase =sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
__lowerCAmelCase =output.images
__lowerCAmelCase =image[0, -3:, -3:, -1]
__lowerCAmelCase =np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7
__lowerCAmelCase =torch.manual_seed(lowerCamelCase_)
__lowerCAmelCase =sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__lowerCAmelCase =output.images
__lowerCAmelCase =image[0, -3:, -3:, -1]
__lowerCAmelCase =np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1])
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 718 |
def __lowerCAmelCase ( __lowerCamelCase : list ) -> list:
__lowerCAmelCase =False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowerCAmelCase =True
for i in range(0 , len(__lowerCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase =input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase =False
for i in range(1 , len(__lowerCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase =input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase =False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
lowercase_ = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowercase_ = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 456 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __magic_name__ :
'''simple docstring'''
__lowercase : int = BlenderbotConfig
__lowercase : Any = {}
__lowercase : Optional[Any] = 'gelu'
def __init__( self:Tuple , _a:Optional[Any] , _a:Optional[Any]=13 , _a:Tuple=7 , _a:Union[str, Any]=True , _a:int=False , _a:int=99 , _a:Optional[int]=32 , _a:List[str]=2 , _a:List[str]=4 , _a:List[Any]=37 , _a:Any=0.1 , _a:int=0.1 , _a:List[Any]=20 , _a:List[str]=2 , _a:int=1 , _a:Dict=0 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ = prepare_blenderbot_inputs_dict(_a , _a , _a )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[Any] , _a:int ):
snake_case__ = TFBlenderbotModel(config=_a ).get_decoder()
snake_case__ = inputs_dict['''input_ids''']
snake_case__ = input_ids[:1, :]
snake_case__ = inputs_dict['''attention_mask'''][:1, :]
snake_case__ = inputs_dict['''head_mask''']
snake_case__ = 1
# first forward pass
snake_case__ = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
snake_case__ , snake_case__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case__ = model(_a , attention_mask=_a )[0]
snake_case__ = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case__ = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Tuple:
if attention_mask is None:
snake_case__ = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowercase : Any = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowercase : Tuple = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowercase : Any = True
__lowercase : int = False
__lowercase : int = False
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = TFBlenderbotModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_tokenizers
@require_tf
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = ['My friends are cool but they eat too many carbs.']
__lowercase : Optional[int] = 'facebook/blenderbot-400M-distill'
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.tokenizer(self.src_text , return_tensors='''tf''' )
snake_case__ = self.model.generate(
model_inputs.input_ids , )
snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 33 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A_ : str = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 265 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class a__ :
lowercase_ = 4_2
# setable values
lowercase_ = 4_2
lowercase_ = 4_2
lowercase_ = None
@classmethod
def a_ ( cls : Optional[int] , UpperCamelCase_ : CommonSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray):
"""simple docstring"""
return cls(common=__A , init_noise_sigma=__A , timesteps=__A)
@dataclass
class a__ ( UpperCamelCase_ ):
lowercase_ = 4_2
class a__ ( UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowercase_ = 4_2
@property
def a_ ( self : Dict):
"""simple docstring"""
return True
@register_to_config
def __init__( self : Dict , UpperCamelCase_ : int = 1000 , UpperCamelCase_ : float = 0.0001 , UpperCamelCase_ : float = 0.02 , UpperCamelCase_ : str = "linear" , UpperCamelCase_ : Optional[jnp.ndarray] = None , UpperCamelCase_ : str = "fixed_small" , UpperCamelCase_ : bool = True , UpperCamelCase_ : str = "epsilon" , UpperCamelCase_ : jnp.dtype = jnp.floataa , ):
"""simple docstring"""
__UpperCAmelCase : Any = dtype
def a_ ( self : str , UpperCamelCase_ : Optional[CommonSchedulerState] = None):
"""simple docstring"""
if common is None:
__UpperCAmelCase : str = CommonSchedulerState.create(self)
# standard deviation of the initial noise distribution
__UpperCAmelCase : Union[str, Any] = jnp.array(1.0 , dtype=self.dtype)
__UpperCAmelCase : Tuple = jnp.arange(0 , self.config.num_train_timesteps).round()[::-1]
return DDPMSchedulerState.create(
common=__A , init_noise_sigma=__A , timesteps=__A , )
def a_ ( self : List[Any] , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : Optional[int] = None):
"""simple docstring"""
return sample
def a_ ( self : List[str] , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : int , UpperCamelCase_ : Tuple = ()):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__UpperCAmelCase : int = (jnp.arange(0 , __A) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__A , timesteps=__A , )
def a_ ( self : Dict , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = state.common.alphas_cumprod[t]
__UpperCAmelCase : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__UpperCAmelCase : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__UpperCAmelCase : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__UpperCAmelCase : Union[str, Any] = jnp.clip(__A , a_min=1e-20)
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__UpperCAmelCase : Optional[int] = jnp.log(jnp.clip(__A , a_min=1e-20))
elif variance_type == "fixed_large":
__UpperCAmelCase : Any = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__UpperCAmelCase : int = jnp.log(state.common.betas[t])
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__UpperCAmelCase : int = variance
__UpperCAmelCase : Dict = state.common.betas[t]
__UpperCAmelCase : int = (predicted_variance + 1) / 2
__UpperCAmelCase : str = frac * max_log + (1 - frac) * min_log
return variance
def a_ ( self : Optional[int] , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : Optional[jax.random.KeyArray] = None , UpperCamelCase_ : bool = True , ):
"""simple docstring"""
__UpperCAmelCase : int = timestep
if key is None:
__UpperCAmelCase : str = jax.random.PRNGKey(0)
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__UpperCAmelCase : Tuple = jnp.split(__A , sample.shape[1] , axis=1)
else:
__UpperCAmelCase : Any = None
# 1. compute alphas, betas
__UpperCAmelCase : Optional[int] = state.common.alphas_cumprod[t]
__UpperCAmelCase : List[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
__UpperCAmelCase : Optional[int] = 1 - alpha_prod_t
__UpperCAmelCase : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__UpperCAmelCase : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__UpperCAmelCase : str = model_output
elif self.config.prediction_type == "v_prediction":
__UpperCAmelCase : Optional[int] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler.")
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__UpperCAmelCase : Optional[Any] = jnp.clip(__A , -1 , 1)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCAmelCase : Optional[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__UpperCAmelCase : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCAmelCase : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__UpperCAmelCase : Optional[int] = jax.random.split(__A , num=1)
__UpperCAmelCase : Union[str, Any] = jax.random.normal(__A , shape=model_output.shape , dtype=self.dtype)
return (self._get_variance(__A , __A , predicted_variance=__A) ** 0.5) * noise
__UpperCAmelCase : List[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype))
__UpperCAmelCase : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__A , state=__A)
def a_ ( self : str , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray , ):
"""simple docstring"""
return add_noise_common(state.common , __A , __A , __A)
def a_ ( self : List[Any] , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray , ):
"""simple docstring"""
return get_velocity_common(state.common , __A , __A , __A)
def __len__( self : Dict):
"""simple docstring"""
return self.config.num_train_timesteps
| 703 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
__UpperCAmelCase : Union[str, Any] = _modexpt(UpperCamelCase , exponent // 2 , UpperCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase , exponent - 1 , UpperCamelCase )) % modulo_value
def _UpperCamelCase ( UpperCamelCase = 1777 , UpperCamelCase = 1855 , UpperCamelCase = 8 ) -> int:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = base
for _ in range(1 , UpperCamelCase ):
__UpperCAmelCase : str = _modexpt(UpperCamelCase , UpperCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 487 | 0 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False, False, False
@dataclass
class snake_case_ :
"""simple docstring"""
snake_case__ = None
snake_case__ = True
snake_case__ = True
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
snake_case__ = field(default="""Audio""" , init=__UpperCamelCase , repr=__UpperCamelCase )
def __call__(self: Optional[int] ) -> Tuple:
'''simple docstring'''
return self.pa_type
def UpperCAmelCase__ (self: List[Any] , __UpperCAmelCase: Union[str, bytes, dict] ) -> dict:
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"bytes": None, "path": value}
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__a : int = BytesIO()
sf.write(__UpperCAmelCase , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__a : Optional[Any] = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
__a : Dict = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767
__a : str = BytesIO(bytes() )
sf.write(__UpperCAmelCase , __UpperCAmelCase , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def UpperCAmelCase__ (self: List[str] , __UpperCAmelCase: dict , __UpperCAmelCase: Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict:
'''simple docstring'''
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
__a , __a : List[Any] = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
__a : Union[str, Any] = xsplitext(__UpperCAmelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
__a : Union[str, Any] = token_per_repo_id or {}
__a : str = path.split("::" )[-1]
try:
__a : str = string_to_dict(__UpperCAmelCase , config.HUB_DATASETS_URL )["repo_id"]
__a : int = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__a : Union[str, Any] = None
with xopen(__UpperCAmelCase , "rb" , use_auth_token=__UpperCAmelCase ) as f:
__a , __a : List[Any] = sf.read(__UpperCAmelCase )
else:
__a , __a : Union[str, Any] = sf.read(__UpperCAmelCase )
__a : Tuple = array.T
if self.mono:
__a : List[Any] = librosa.to_mono(__UpperCAmelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__a : Optional[int] = librosa.resample(__UpperCAmelCase , orig_sr=__UpperCAmelCase , target_sr=self.sampling_rate )
__a : str = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCAmelCase__ (self: Tuple ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def UpperCAmelCase__ (self: Optional[int] , __UpperCAmelCase: Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
__a : List[str] = pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
__a : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__a : List[Any] = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
__a : Any = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
__a : int = pa.array([Audio().encode_example(__UpperCAmelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
__a : Tuple = storage.field("bytes" )
else:
__a : str = pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
__a : Optional[Any] = storage.field("path" )
else:
__a : Optional[Any] = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
__a : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
def UpperCAmelCase__ (self: Tuple , __UpperCAmelCase: pa.StructArray ) -> pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(__UpperCAmelCase: List[Any] ):
with xopen(__UpperCAmelCase , "rb" ) as f:
__a : Any = f.read()
return bytes_
__a : str = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__a : Union[str, Any] = pa.array(
[os.path.basename(__UpperCAmelCase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
__a : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
| 351 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case_ ( __UpperCamelCase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase__ (__UpperCAmelCase: ArgumentParser ) -> Tuple:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__ (self: List[str] ) -> List[str]:
'''simple docstring'''
raise NotImplementedError()
| 351 | 1 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
a_ :str = namedtuple('covid_data', 'cases deaths recovered')
def a ( A__ = "https://www.worldometers.info/coronavirus/" ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(__snake_case ).content ).xpath(__snake_case ) )
a_ :List[str] = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 705 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ :Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_sentencepiece_available():
import sentencepiece as sp
a_ :Union[str, Any] = 5
a_ :int = 10
@require_sentencepiece
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = SpeechaTextTokenizer
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : List[str] = True
def lowercase__ ( self : int ):
super().setUp()
SCREAMING_SNAKE_CASE__ : Any = sp.SentencePieceProcessor()
spm_model.Load(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_lowercase ) )]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(self.tmpdirname )
save_json(_lowercase , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_lowercase , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
SCREAMING_SNAKE_CASE__ : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : str = '''<pad>'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_lowercase ) , 10_01 )
def lowercase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [2_89, 50, 14, 1_74, 3_86] , )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(_lowercase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
SCREAMING_SNAKE_CASE__ : int = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def lowercase__ ( self : List[str] ):
# fmt: off
SCREAMING_SNAKE_CASE__ : List[Any] = {'''input_ids''': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , )
@require_sentencepiece
class lowercase ( unittest.TestCase ):
lowerCamelCase : Union[str, Any] = '''valhalla/s2t_mustc_multilinguial_medium'''
lowerCamelCase : List[Any] = '''C\'est trop cool'''
lowerCamelCase : Any = '''Esto es genial'''
@classmethod
def lowercase__ ( cls : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowercase__ ( self : str ):
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11 )
def lowercase__ ( self : Tuple ):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def lowercase__ ( self : Optional[int] ):
self.assertIn(_lowercase , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE__ : Tuple = [ES_CODE, 4, 16_01, 47, 76_47, 2]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertNotIn(self.tokenizer.eos_token , _lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''fr'''
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _lowercase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
SCREAMING_SNAKE_CASE__ : int = '''es'''
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 250 | 0 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE_ : Dict = 1_0
def __UpperCAmelCase ( self : Tuple ,**__A : Tuple ) -> List[str]:
_lowercase = {
'num_train_timesteps': 1100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**__A )
return config
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__A ,beta_end=__A )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__A )
def __UpperCAmelCase ( self : Dict ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def __UpperCAmelCase ( self : str ) -> int:
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config()
_lowercase = scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
_lowercase = torch.manual_seed(0 )
_lowercase = self.dummy_model()
_lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowercase = sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
_lowercase = scheduler.scale_model_input(__A ,__A )
_lowercase = model(__A ,__A )
_lowercase = scheduler.step(__A ,__A ,__A ,generator=__A )
_lowercase = output.prev_sample
_lowercase = torch.sum(torch.abs(__A ) )
_lowercase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config(prediction_type='v_prediction' )
_lowercase = scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
_lowercase = torch.manual_seed(0 )
_lowercase = self.dummy_model()
_lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowercase = sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
_lowercase = scheduler.scale_model_input(__A ,__A )
_lowercase = model(__A ,__A )
_lowercase = scheduler.step(__A ,__A ,__A ,generator=__A )
_lowercase = output.prev_sample
_lowercase = torch.sum(torch.abs(__A ) )
_lowercase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config()
_lowercase = scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps ,device=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = self.dummy_model()
_lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowercase = sample.to(__A )
for t in scheduler.timesteps:
_lowercase = scheduler.scale_model_input(__A ,__A )
_lowercase = model(__A ,__A )
_lowercase = scheduler.step(__A ,__A ,__A ,generator=__A )
_lowercase = output.prev_sample
_lowercase = torch.sum(torch.abs(__A ) )
_lowercase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config()
_lowercase = scheduler_class(**__A ,use_karras_sigmas=__A )
scheduler.set_timesteps(self.num_inference_steps ,device=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = self.dummy_model()
_lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowercase = sample.to(__A )
for t in scheduler.timesteps:
_lowercase = scheduler.scale_model_input(__A ,__A )
_lowercase = model(__A ,__A )
_lowercase = scheduler.step(__A ,__A ,__A ,generator=__A )
_lowercase = output.prev_sample
_lowercase = torch.sum(torch.abs(__A ) )
_lowercase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3 | 67 |
"""simple docstring"""
def _lowerCAmelCase ( ) -> int:
return [
a * b * (1_0_0_0 - a - b)
for a in range(1, 9_9_9 )
for b in range(lowerCamelCase__, 9_9_9 )
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 572 | 0 |
from string import ascii_lowercase, ascii_uppercase
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
if not sentence:
return ""
A__ = dict(zip(_lowerCamelCase , _lowerCamelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 716 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''falcon'''
UpperCAmelCase__ = ['''past_key_values''']
def __init__( self : Optional[Any] , UpperCAmelCase__ : str=65_024 , UpperCAmelCase__ : List[Any]=4_544 , UpperCAmelCase__ : Optional[Any]=32 , UpperCAmelCase__ : Dict=71 , UpperCAmelCase__ : Any=1e-5 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Dict=11 , UpperCAmelCase__ : str=11 , **UpperCAmelCase__ : Dict , ) ->Any:
'''simple docstring'''
A__ = vocab_size
# Backward compatibility with n_embed kwarg
A__ = kwargs.pop('''n_embed''' , UpperCAmelCase__)
A__ = hidden_size if n_embed is None else n_embed
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = use_cache
A__ = hidden_dropout
A__ = attention_dropout
A__ = bos_token_id
A__ = eos_token_id
A__ = num_attention_heads if num_kv_heads is None else num_kv_heads
A__ = alibi
A__ = new_decoder_architecture
A__ = multi_query # Ignored when new_decoder_architecture is True
A__ = parallel_attn
A__ = bias
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
@property
def SCREAMING_SNAKE_CASE ( self : str) ->int:
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self : str) ->Dict:
'''simple docstring'''
return not self.alibi
| 177 | 0 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__a: Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
__a: Dict = 256
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = ['''melgan''']
def __init__( self : List[str] , lowerCamelCase : SpectrogramNotesEncoder , lowerCamelCase : SpectrogramContEncoder , lowerCamelCase : TaFilmDecoder , lowerCamelCase : DDPMScheduler , lowerCamelCase : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
_UpperCAmelCase = math.log(1E-5 ) # Matches MelGAN training.
_UpperCAmelCase = 4.0 # Largest value for most examples
_UpperCAmelCase = 128
self.register_modules(
notes_encoder=lowerCamelCase , continuous_encoder=lowerCamelCase , decoder=lowerCamelCase , scheduler=lowerCamelCase , melgan=lowerCamelCase , )
def lowerCamelCase ( self : List[str] , lowerCamelCase : List[str] , lowerCamelCase : int=(-1.0, 1.0) , lowerCamelCase : List[str]=False ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = output_range
if clip:
_UpperCAmelCase = torch.clip(lowerCamelCase , self.min_value , self.max_value )
# Scale to [0, 1].
_UpperCAmelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : Any=(-1.0, 1.0) , lowerCamelCase : Tuple=False ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = input_range
_UpperCAmelCase = torch.clip(lowerCamelCase , lowerCamelCase , lowerCamelCase ) if clip else outputs
# Scale to [0, 1].
_UpperCAmelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def lowerCamelCase ( self : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = input_tokens > 0
_UpperCAmelCase , _UpperCAmelCase = self.notes_encoder(
encoder_input_tokens=lowerCamelCase , encoder_inputs_mask=lowerCamelCase )
_UpperCAmelCase , _UpperCAmelCase = self.continuous_encoder(
encoder_inputs=lowerCamelCase , encoder_inputs_mask=lowerCamelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def lowerCamelCase ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase = noise_time
if not torch.is_tensor(lowerCamelCase ):
_UpperCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0:
_UpperCAmelCase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCAmelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
_UpperCAmelCase = self.decoder(
encodings_and_masks=lowerCamelCase , decoder_input_tokens=lowerCamelCase , decoder_noise_time=lowerCamelCase )
return logits
@torch.no_grad()
def __call__( self : Optional[int] , lowerCamelCase : List[List[int]] , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : int = 100 , lowerCamelCase : bool = True , lowerCamelCase : str = "numpy" , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase , lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(lowerCamelCase )}.""" )
_UpperCAmelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
_UpperCAmelCase = np.zeros([1, 0, self.n_dims] , np.floataa )
_UpperCAmelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowerCamelCase , device=self.device )
for i, encoder_input_tokens in enumerate(lowerCamelCase ):
if i == 0:
_UpperCAmelCase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
_UpperCAmelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowerCamelCase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_UpperCAmelCase = ones
_UpperCAmelCase = self.scale_features(
lowerCamelCase , output_range=[-1.0, 1.0] , clip=lowerCamelCase )
_UpperCAmelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowerCamelCase , continuous_mask=lowerCamelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_UpperCAmelCase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowerCamelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowerCamelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_UpperCAmelCase = self.decode(
encodings_and_masks=lowerCamelCase , input_tokens=lowerCamelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ).prev_sample
_UpperCAmelCase = self.scale_to_features(lowerCamelCase , input_range=[-1.0, 1.0] )
_UpperCAmelCase = mel[:1]
_UpperCAmelCase = mel.cpu().float().numpy()
_UpperCAmelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase , lowerCamelCase )
logger.info("""Generated segment""" , lowerCamelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
_UpperCAmelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_UpperCAmelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowerCamelCase ) | 108 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = analyze_text(_lowercase )
UpperCamelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCamelCase = sum(single_char_strings.values() )
# one length string
UpperCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCamelCase = single_char_strings[ch]
UpperCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
UpperCamelCase = sum(two_char_strings.values() )
UpperCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCamelCase = cha + cha
if sequence in two_char_strings:
UpperCamelCase = two_char_strings[sequence]
UpperCamelCase = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = Counter() # type: ignore
UpperCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 34 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __lowerCAmelCase ( A ):
UpperCamelCase = 42
UpperCamelCase = 42
class __lowerCAmelCase ( A , A ):
UpperCamelCase = 1
@register_to_config
def __init__( self : List[str] , A : int = 20_00 , A : float = 0.1_5 , A : float = 0.0_1 , A : float = 1_3_4_8.0 , A : float = 1E-5 , A : int = 1 , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = sigma_max
# setable values
_UpperCAmelCase = None
self.set_sigmas(A , A , A , A)
def _lowerCamelCase ( self : int , A : torch.FloatTensor , A : Optional[int] = None) -> torch.FloatTensor:
"""simple docstring"""
return sample
def _lowerCamelCase ( self : Union[str, Any] , A : int , A : float = None , A : Union[str, torch.device] = None) -> Any:
"""simple docstring"""
_UpperCAmelCase = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_UpperCAmelCase = torch.linspace(1 , A , A , device=A)
def _lowerCamelCase ( self : List[Any] , A : int , A : float = None , A : float = None , A : float = None) -> str:
"""simple docstring"""
_UpperCAmelCase = sigma_min if sigma_min is not None else self.config.sigma_min
_UpperCAmelCase = sigma_max if sigma_max is not None else self.config.sigma_max
_UpperCAmelCase = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(A , A)
_UpperCAmelCase = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_UpperCAmelCase = torch.exp(torch.linspace(math.log(A) , math.log(A) , A))
_UpperCAmelCase = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def _lowerCamelCase ( self : Dict , A : Optional[Any] , A : Union[str, Any]) -> str:
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device)) , self.discrete_sigmas[timesteps - 1].to(timesteps.device) , )
def _lowerCamelCase ( self : int , A : torch.FloatTensor , A : int , A : torch.FloatTensor , A : Optional[torch.Generator] = None , A : bool = True , ) -> Union[SdeVeOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
_UpperCAmelCase = timestep * torch.ones(
sample.shape[0] , device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
_UpperCAmelCase = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_UpperCAmelCase = timesteps.to(self.discrete_sigmas.device)
_UpperCAmelCase = self.discrete_sigmas[timesteps].to(sample.device)
_UpperCAmelCase = self.get_adjacent_sigma(A , A).to(sample.device)
_UpperCAmelCase = torch.zeros_like(A)
_UpperCAmelCase = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_UpperCAmelCase = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
_UpperCAmelCase = diffusion.unsqueeze(-1)
_UpperCAmelCase = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_UpperCAmelCase = randn_tensor(
sample.shape , layout=sample.layout , generator=A , device=sample.device , dtype=sample.dtype)
_UpperCAmelCase = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_UpperCAmelCase = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=A , prev_sample_mean=A)
def _lowerCamelCase ( self : Dict , A : torch.FloatTensor , A : torch.FloatTensor , A : Optional[torch.Generator] = None , A : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_UpperCAmelCase = randn_tensor(sample.shape , layout=sample.layout , generator=A).to(sample.device)
# compute step size from the model_output, the noise, and the snr
_UpperCAmelCase = torch.norm(model_output.reshape(model_output.shape[0] , -1) , dim=-1).mean()
_UpperCAmelCase = torch.norm(noise.reshape(noise.shape[0] , -1) , dim=-1).mean()
_UpperCAmelCase = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_UpperCAmelCase = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_UpperCAmelCase = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
_UpperCAmelCase = step_size.unsqueeze(-1)
_UpperCAmelCase = sample + step_size * model_output
_UpperCAmelCase = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A)
def _lowerCamelCase ( self : List[str] , A : torch.FloatTensor , A : torch.FloatTensor , A : torch.FloatTensor , ) -> torch.FloatTensor:
"""simple docstring"""
_UpperCAmelCase = timesteps.to(original_samples.device)
_UpperCAmelCase = self.discrete_sigmas.to(original_samples.device)[timesteps]
_UpperCAmelCase = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(A) * sigmas[:, None, None, None]
)
_UpperCAmelCase = noise + original_samples
return noisy_samples
def __len__( self : List[Any]) -> str:
"""simple docstring"""
return self.config.num_train_timesteps
| 639 |
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , lowercase__ : Any , lowercase__ : Tuple=7 , lowercase__ : List[Any]=3 , lowercase__ : Any=3_0 , lowercase__ : List[Any]=4_0_0 , lowercase__ : Dict=True , lowercase__ : Dict=None , lowercase__ : Any=True , lowercase__ : List[Any]=[0.5, 0.5, 0.5] , lowercase__ : Tuple=[0.5, 0.5, 0.5] , lowercase__ : Optional[Any]=True , lowercase__ : Union[str, Any]=1 / 2_5_5 , lowercase__ : Any=True , ):
__lowercase : Optional[int] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
__lowercase : str = parent
__lowercase : Optional[int] = batch_size
__lowercase : int = num_channels
__lowercase : Any = min_resolution
__lowercase : Dict = max_resolution
__lowercase : Optional[int] = do_resize
__lowercase : str = size
__lowercase : Optional[int] = do_normalize
__lowercase : Optional[Any] = image_mean
__lowercase : Optional[Any] = image_std
__lowercase : Optional[Any] = do_rescale
__lowercase : Optional[Any] = rescale_factor
__lowercase : Any = do_pad
def snake_case ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case ( self : int , lowercase__ : Tuple , lowercase__ : Dict=False ):
if not batched:
__lowercase : List[str] = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
__lowercase : Tuple = image.size
else:
__lowercase : Any = image.shape[1], image.shape[2]
if w < h:
__lowercase : Any = int(self.size["shortest_edge"] * h / w )
__lowercase : str = self.size['shortest_edge']
elif w > h:
__lowercase : Optional[Any] = self.size['shortest_edge']
__lowercase : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
__lowercase : int = self.size['shortest_edge']
__lowercase : Any = self.size['shortest_edge']
else:
__lowercase : Dict = []
for image in image_inputs:
__lowercase : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowercase : Optional[Any] = max(__UpperCamelCase , key=lambda lowercase__ : item[0] )[0]
__lowercase : Any = max(__UpperCamelCase , key=lambda lowercase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = YolosImageProcessor if is_vision_available() else None
def snake_case ( self : Dict ):
__lowercase : str = YolosImageProcessingTester(self )
@property
def snake_case ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : List[str] ):
__lowercase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "size" ) )
def snake_case ( self : List[str] ):
__lowercase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
__lowercase : List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def snake_case ( self : List[str] ):
pass
def snake_case ( self : List[str] ):
__lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
__lowercase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowercase : int = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase : Optional[Any] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
__lowercase : Any = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self : Dict ):
__lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
__lowercase : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowercase : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase : int = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
__lowercase : int = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self : Tuple ):
__lowercase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
__lowercase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowercase : Optional[Any] = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase : Any = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
__lowercase : int = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self : Any ):
__lowercase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
__lowercase : Union[str, Any] = self.image_processing_class(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase , do_rescale=__UpperCamelCase )
# create random PyTorch tensors
__lowercase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
__lowercase : List[Any] = image_processing_a.pad(__UpperCamelCase , return_tensors="pt" )
__lowercase : Tuple = image_processing_a(__UpperCamelCase , return_tensors="pt" )
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1e-4 ) )
@slow
def snake_case ( self : Tuple ):
__lowercase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__lowercase : Tuple = json.loads(f.read() )
__lowercase : Tuple = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
__lowercase : Optional[Any] = YolosImageProcessor.from_pretrained("hustvl/yolos-small" )
__lowercase : int = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="pt" )
# verify pixel values
__lowercase : Optional[int] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __UpperCamelCase )
__lowercase : Dict = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __UpperCamelCase , atol=1e-4 ) )
# verify area
__lowercase : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __UpperCamelCase ) )
# verify boxes
__lowercase : int = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __UpperCamelCase )
__lowercase : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __UpperCamelCase , atol=1e-3 ) )
# verify image_id
__lowercase : Tuple = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __UpperCamelCase ) )
# verify is_crowd
__lowercase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __UpperCamelCase ) )
# verify class_labels
__lowercase : Union[str, Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __UpperCamelCase ) )
# verify orig_size
__lowercase : Tuple = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __UpperCamelCase ) )
# verify size
__lowercase : Any = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __UpperCamelCase ) )
@slow
def snake_case ( self : Dict ):
__lowercase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__lowercase : Any = json.loads(f.read() )
__lowercase : List[str] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
__lowercase : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__lowercase : Dict = YolosImageProcessor(format="coco_panoptic" )
__lowercase : Optional[int] = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="pt" )
# verify pixel values
__lowercase : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __UpperCamelCase )
__lowercase : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __UpperCamelCase , atol=1e-4 ) )
# verify area
__lowercase : Optional[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __UpperCamelCase ) )
# verify boxes
__lowercase : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __UpperCamelCase )
__lowercase : Tuple = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __UpperCamelCase , atol=1e-3 ) )
# verify image_id
__lowercase : int = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __UpperCamelCase ) )
# verify is_crowd
__lowercase : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __UpperCamelCase ) )
# verify class_labels
__lowercase : int = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __UpperCamelCase ) )
# verify masks
__lowercase : Dict = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __UpperCamelCase )
# verify orig_size
__lowercase : Tuple = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __UpperCamelCase ) )
# verify size
__lowercase : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __UpperCamelCase ) )
| 575 | """simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__SCREAMING_SNAKE_CASE =False
class UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : str = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase_ : int = torch.manual_seed(0 )
lowercase_ : Optional[int] = pipe.dual_guided(
prompt='first prompt' ,image=__UpperCamelCase ,text_to_image_strength=0.75 ,generator=__UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ,).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCamelCase )
lowercase_ : str = VersatileDiffusionPipeline.from_pretrained(__UpperCamelCase ,torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : List[Any] = generator.manual_seed(0 )
lowercase_ : Union[str, Any] = pipe.dual_guided(
prompt='first prompt' ,image=__UpperCamelCase ,text_to_image_strength=0.75 ,generator=__UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ,).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Any = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : int = 'cyberpunk 2077'
lowercase_ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase_ : Optional[Any] = torch.manual_seed(0 )
lowercase_ : int = pipe.dual_guided(
prompt=__UpperCamelCase ,image=__UpperCamelCase ,text_to_image_strength=0.75 ,generator=__UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ,).images
lowercase_ : int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ : Union[str, Any] = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowercase_ : Optional[Any] = 'A painting of a squirrel eating a burger '
lowercase_ : Optional[Any] = torch.manual_seed(0 )
lowercase_ : Dict = pipe.text_to_image(
prompt=__UpperCamelCase ,generator=__UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ).images
lowercase_ : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ : Tuple = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowercase_ : Dict = pipe.image_variation(__UpperCamelCase ,generator=__UpperCamelCase ,output_type='numpy' ).images
lowercase_ : Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ : Union[str, Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 425 | 0 |
def __magic_name__ ( lowercase_ ) -> bool:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(lowercase_ )
if number < 0:
return False
UpperCamelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
def __magic_name__ ( lowercase_ ) -> str:
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 414 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_lowercase = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_lowercase ):
"""simple docstring"""
super().__init__(**_lowercase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , _lowercase , **_lowercase ):
"""simple docstring"""
return super().__call__(_lowercase , **_lowercase )
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {}
if "candidate_labels" in kwargs:
_lowerCAmelCase = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
_lowerCAmelCase = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _lowercase ( self , _lowercase , _lowercase=None , _lowercase="This is a photo of {}." ):
"""simple docstring"""
_lowerCAmelCase = load_image(_lowercase )
_lowerCAmelCase = self.image_processor(images=[image] , return_tensors=self.framework )
_lowerCAmelCase = candidate_labels
_lowerCAmelCase = [hypothesis_template.format(_lowercase ) for x in candidate_labels]
_lowerCAmelCase = self.tokenizer(_lowercase , return_tensors=self.framework , padding=_lowercase )
_lowerCAmelCase = [text_inputs]
return inputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_inputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , _lowercase ):
_lowerCAmelCase = text_inputs[0]
else:
# Batching case.
_lowerCAmelCase = text_inputs[0][0]
_lowerCAmelCase = self.model(**_lowercase , **_lowercase )
_lowerCAmelCase = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_outputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_outputs["""logits"""][0]
if self.framework == "pt":
_lowerCAmelCase = logits.softmax(dim=-1 ).squeeze(-1 )
_lowerCAmelCase = probs.tolist()
if not isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = [scores]
elif self.framework == "tf":
_lowerCAmelCase = stable_softmax(_lowercase , axis=-1 )
_lowerCAmelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
_lowerCAmelCase = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_lowercase , _lowercase ) , key=lambda _lowercase : -x[0] )
]
return result
| 5 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowercase__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
_UpperCamelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCamelCase = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_UpperCamelCase = f"{src_lang}-{tgt_lang}"
_UpperCamelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=_UpperCamelCase , exist_ok=_UpperCamelCase )
_UpperCamelCase = os.path.join(_UpperCamelCase , "README.md" )
print(f"Generating {path}" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
snake_case_ : List[Any] = Path(__file__).resolve().parent.parent.parent
snake_case_ : List[Any] = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : str = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 138 | 0 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
snake_case =None
snake_case =BloomTokenizerFast
snake_case =BloomTokenizerFast
snake_case =True
snake_case =False
snake_case ="""tokenizer_file"""
snake_case ={"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def SCREAMING_SNAKE_CASE ( self ):
super().setUp()
_UpperCAmelCase =BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self , **_snake_case ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_rust_tokenizer()
_UpperCAmelCase =["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
_UpperCAmelCase =[[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
_UpperCAmelCase =tokenizer.batch_encode_plus(lowerCamelCase_ )["""input_ids"""]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_UpperCAmelCase =tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( self , _snake_case=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_UpperCAmelCase =self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
_UpperCAmelCase ="""This is a simple input"""
_UpperCAmelCase =["""This is a simple input 1""", """This is a simple input 2"""]
_UpperCAmelCase =("""This is a simple input""", """This is a pair""")
_UpperCAmelCase =[
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(lowerCamelCase_ , max_length=lowerCamelCase_ )
tokenizer_r.encode_plus(lowerCamelCase_ , max_length=lowerCamelCase_ )
tokenizer_r.batch_encode_plus(lowerCamelCase_ , max_length=lowerCamelCase_ )
tokenizer_r.encode(lowerCamelCase_ , max_length=lowerCamelCase_ )
tokenizer_r.batch_encode_plus(lowerCamelCase_ , max_length=lowerCamelCase_ )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
_UpperCAmelCase =None # Hotfixing padding = None
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="max_length" )
# Simple input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="max_length" )
# Simple input
self.assertRaises(
lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="max_length" , )
# Pair input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="max_length" )
# Pair input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="max_length" )
# Pair input
self.assertRaises(
lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="max_length" , )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_rust_tokenizer()
_UpperCAmelCase =load_dataset("xnli" , "all_languages" , split="test" , streaming=lowerCamelCase_ )
_UpperCAmelCase =next(iter(lowerCamelCase_ ) )["""premise"""] # pick up one data
_UpperCAmelCase =list(sample_data.values() )
_UpperCAmelCase =list(map(tokenizer.encode , lowerCamelCase_ ) )
_UpperCAmelCase =[tokenizer.decode(lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ ) for x in output_tokens]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( self ):
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 713 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case =MODEL_FOR_MASKED_LM_MAPPING
snake_case =TF_MODEL_FOR_MASKED_LM_MAPPING
def SCREAMING_SNAKE_CASE ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_UpperCAmelCase =unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_snake_case , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1E-0_5, "token": 3_8015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1E-0_5, "token": 2_5506, "token_str": " accuser"},
] , )
_UpperCAmelCase =unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_snake_case , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1E-0_5,
"token": 3_8015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1E-0_5,
"token": 2_5506,
"token_str": " accuser",
},
] , )
_UpperCAmelCase =unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_snake_case , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2E-0_5, "token": 1_3606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2E-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9E-0_5, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_UpperCAmelCase =unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_snake_case , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2E-0_5, "token": 3_5676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2E-0_5, "token": 1_6416, "token_str": "ELS"},
] , )
_UpperCAmelCase =unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_snake_case , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2E-0_5,
"token": 3_5676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2E-0_5, "token": 1_6416, "token_str": "ELS"},
] , )
_UpperCAmelCase =unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_snake_case , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1E-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2E-0_5, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2E-0_5, "token": 1_3606, "token_str": " Clara"},
] , )
_UpperCAmelCase =unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=6 ) , [
[
{
"score": 2.2E-0_5,
"token": 3_5676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2E-0_5, "token": 1_6416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2E-0_5,
"token": 3_5676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2E-0_5, "token": 1_6416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase =pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_snake_case , _snake_case )
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(_snake_case )
@slow
@require_tf
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(_snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
_UpperCAmelCase =unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_snake_case ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
_UpperCAmelCase =unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_snake_case ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 1_2790,
"token_str": " Lyon",
},
] , )
_UpperCAmelCase =unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_snake_case ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 1_3606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_UpperCAmelCase =None
_UpperCAmelCase =None
self.run_pipeline_test(_snake_case , [] )
@require_tf
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_UpperCAmelCase =None
_UpperCAmelCase =None
self.run_pipeline_test(_snake_case , [] )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_UpperCAmelCase =FillMaskPipeline(model=_snake_case , tokenizer=_snake_case )
_UpperCAmelCase =[
F"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
_UpperCAmelCase =fill_masker.tokenizer
_UpperCAmelCase =fill_masker.model
_UpperCAmelCase =fill_masker(
F"This is a {tokenizer.mask_token}" , )
self.assertEqual(
_snake_case , [
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
] , )
_UpperCAmelCase =fill_masker([F"This is a {tokenizer.mask_token}"] )
self.assertEqual(
_snake_case , [
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
] , )
_UpperCAmelCase =fill_masker([F"This is a {tokenizer.mask_token}", F"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
_snake_case , [
[
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
],
[
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
],
] , )
with self.assertRaises(_snake_case ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_snake_case ):
fill_masker("This is" )
self.run_test_top_k(_snake_case , _snake_case )
self.run_test_targets(_snake_case , _snake_case )
self.run_test_top_k_targets(_snake_case , _snake_case )
self.fill_mask_with_duplicate_targets_and_top_k(_snake_case , _snake_case )
self.fill_mask_with_multiple_masks(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
_UpperCAmelCase =tokenizer.get_vocab()
_UpperCAmelCase =sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase =FillMaskPipeline(model=_snake_case , tokenizer=_snake_case , targets=_snake_case )
_UpperCAmelCase =fill_masker(F"This is a {tokenizer.mask_token}" )
self.assertEqual(
_snake_case , [
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
] , )
_UpperCAmelCase ={vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , _snake_case )
_UpperCAmelCase =[tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(_snake_case ) )
# Call argument
_UpperCAmelCase =FillMaskPipeline(model=_snake_case , tokenizer=_snake_case )
_UpperCAmelCase =fill_masker(F"This is a {tokenizer.mask_token}" , targets=_snake_case )
self.assertEqual(
_snake_case , [
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
] , )
_UpperCAmelCase ={vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , _snake_case )
_UpperCAmelCase =[tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(_snake_case ) )
# Score equivalence
_UpperCAmelCase =fill_masker(F"This is a {tokenizer.mask_token}" , targets=_snake_case )
_UpperCAmelCase =[top_mask["token_str"] for top_mask in outputs]
_UpperCAmelCase =[top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_snake_case ) == set(_snake_case ):
_UpperCAmelCase =fill_masker(F"This is a {tokenizer.mask_token}" , targets=_snake_case )
_UpperCAmelCase =[top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_snake_case ) , nested_simplify(_snake_case ) )
# Raises with invalid
with self.assertRaises(_snake_case ):
_UpperCAmelCase =fill_masker(F"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_snake_case ):
_UpperCAmelCase =fill_masker(F"This is a {tokenizer.mask_token}" , targets=[""] )
with self.assertRaises(_snake_case ):
_UpperCAmelCase =fill_masker(F"This is a {tokenizer.mask_token}" , targets="" )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
_UpperCAmelCase =FillMaskPipeline(model=_snake_case , tokenizer=_snake_case , top_k=2 )
_UpperCAmelCase =fill_masker(F"This is a {tokenizer.mask_token}" )
self.assertEqual(
_snake_case , [
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
] , )
_UpperCAmelCase =FillMaskPipeline(model=_snake_case , tokenizer=_snake_case )
_UpperCAmelCase =fill_masker(F"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
_snake_case , [
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
] , )
self.assertEqual(nested_simplify(_snake_case ) , nested_simplify(_snake_case ) )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
_UpperCAmelCase =tokenizer.get_vocab()
_UpperCAmelCase =FillMaskPipeline(model=_snake_case , tokenizer=_snake_case )
# top_k=2, ntargets=3
_UpperCAmelCase =sorted(vocab.keys() )[:3]
_UpperCAmelCase =fill_masker(F"This is a {tokenizer.mask_token}" , top_k=2 , targets=_snake_case )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase =[el["token_str"] for el in sorted(_snake_case , key=lambda _snake_case : x["score"] , reverse=_snake_case )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_snake_case ).issubset(_snake_case ):
_UpperCAmelCase =fill_masker(F"This is a {tokenizer.mask_token}" , top_k=3 , targets=_snake_case )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_snake_case ) , nested_simplify(_snake_case ) )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
_UpperCAmelCase =FillMaskPipeline(model=_snake_case , tokenizer=_snake_case )
_UpperCAmelCase =tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase =sorted(vocab.keys() )[:3]
_UpperCAmelCase =[targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase =fill_masker(F"My name is {tokenizer.mask_token}" , targets=_snake_case , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_snake_case ) , 3 )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
_UpperCAmelCase =FillMaskPipeline(model=_snake_case , tokenizer=_snake_case )
_UpperCAmelCase =fill_masker(
F"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
_snake_case , [
[
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
],
[
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
],
[
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
],
] , )
| 592 | 0 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
lowercase = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
lowercase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase = transform(__SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(__SCREAMING_SNAKE_CASE )
return image
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if "visual_encoder" in key:
lowercase = re.sub('visual_encoder*' , 'vision_model.encoder' , __SCREAMING_SNAKE_CASE )
if "blocks" in key:
lowercase = re.sub(r'blocks' , 'layers' , __SCREAMING_SNAKE_CASE )
if "attn" in key:
lowercase = re.sub(r'attn' , 'self_attn' , __SCREAMING_SNAKE_CASE )
if "norm1" in key:
lowercase = re.sub(r'norm1' , 'layer_norm1' , __SCREAMING_SNAKE_CASE )
if "norm2" in key:
lowercase = re.sub(r'norm2' , 'layer_norm2' , __SCREAMING_SNAKE_CASE )
if "encoder.norm" in key:
lowercase = re.sub(r'encoder.norm' , 'post_layernorm' , __SCREAMING_SNAKE_CASE )
if "encoder.patch_embed.proj" in key:
lowercase = re.sub(r'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , __SCREAMING_SNAKE_CASE )
if "encoder.pos_embed" in key:
lowercase = re.sub(r'encoder.pos_embed' , 'embeddings.position_embedding' , __SCREAMING_SNAKE_CASE )
if "encoder.cls_token" in key:
lowercase = re.sub(r'encoder.cls_token' , 'embeddings.class_embedding' , __SCREAMING_SNAKE_CASE )
if "self_attn" in key:
lowercase = re.sub(r'self_attn.proj' , 'self_attn.projection' , __SCREAMING_SNAKE_CASE )
return key
@torch.no_grad()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
if config_path is not None:
lowercase = BlipConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
lowercase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowercase = BlipForConditionalGeneration(__SCREAMING_SNAKE_CASE ).eval()
lowercase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
lowercase = blip_decoder(pretrained=__SCREAMING_SNAKE_CASE , image_size=384 , vit='base' )
lowercase = pt_model.eval()
lowercase = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase = modified_state_dict.pop(__SCREAMING_SNAKE_CASE )
lowercase = rename_key(__SCREAMING_SNAKE_CASE )
lowercase = value
hf_model.load_state_dict(__SCREAMING_SNAKE_CASE )
lowercase = 384
lowercase = load_demo_image(image_size=__SCREAMING_SNAKE_CASE , device='cpu' )
lowercase = BertTokenizer.from_pretrained('bert-base-uncased' )
lowercase = tokenizer(['a picture of'] ).input_ids
lowercase = hf_model.generate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase = hf_model.generate(__SCREAMING_SNAKE_CASE )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__SCREAMING_SNAKE_CASE )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
lowercase = blip_vqa(pretrained=__SCREAMING_SNAKE_CASE , image_size=__SCREAMING_SNAKE_CASE , vit='base' )
vqa_model.eval()
lowercase = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase = modified_state_dict.pop(__SCREAMING_SNAKE_CASE )
lowercase = rename_key(__SCREAMING_SNAKE_CASE )
lowercase = value
lowercase = BlipForQuestionAnswering(__SCREAMING_SNAKE_CASE )
hf_vqa_model.load_state_dict(__SCREAMING_SNAKE_CASE )
lowercase = ['How many dogs are in this image?']
lowercase = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_ids
lowercase = hf_vqa_model.generate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
lowercase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
lowercase = blip_itm(pretrained=__SCREAMING_SNAKE_CASE , image_size=__SCREAMING_SNAKE_CASE , vit='base' )
itm_model.eval()
lowercase = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase = modified_state_dict.pop(__SCREAMING_SNAKE_CASE )
lowercase = rename_key(__SCREAMING_SNAKE_CASE )
lowercase = value
lowercase = BlipForImageTextRetrieval(__SCREAMING_SNAKE_CASE )
lowercase = ['A picture of a woman with a dog sitting in a beach']
lowercase = tokenizer(
__SCREAMING_SNAKE_CASE , return_tensors='pt' , padding='max_length' , truncation=__SCREAMING_SNAKE_CASE , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(__SCREAMING_SNAKE_CASE )
hf_itm_model.eval()
lowercase = hf_itm_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_itm_head=__SCREAMING_SNAKE_CASE )
lowercase = hf_itm_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_itm_head=__SCREAMING_SNAKE_CASE )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
UpperCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 84 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__a = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
__a = 10
__a = 256
def _UpperCamelCase ( lowerCAmelCase_ ) ->Optional[MinHash]:
if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS:
return None
UpperCAmelCase = MinHash(num_perm=lowerCAmelCase_ )
for token in set(lowerCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def _UpperCamelCase ( lowerCAmelCase_ ) ->Set[str]:
return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0}
class __lowercase :
def __init__( self : List[str] , *,
__lowerCamelCase : float = 0.85 , ) -> Any:
"""simple docstring"""
UpperCAmelCase = duplication_jaccard_threshold
UpperCAmelCase = NUM_PERM
UpperCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
UpperCAmelCase = defaultdict(__lowerCamelCase )
def _lowercase ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : MinHash ) -> None:
"""simple docstring"""
UpperCAmelCase = self._index.query(__lowerCamelCase )
if code_key in self._index.keys:
print(F"""Duplicate key {code_key}""" )
return
self._index.insert(__lowerCamelCase , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__lowerCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__lowerCamelCase )
def _lowercase ( self : Union[str, Any] ) -> List[List[Dict]]:
"""simple docstring"""
UpperCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
UpperCAmelCase = [base] + list(__lowerCamelCase )
# reformat the cluster to be a list of dict
UpperCAmelCase = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__lowerCamelCase )
return duplicate_clusters
def _lowercase ( self : Tuple , __lowerCamelCase : Optional[int] ) -> None:
"""simple docstring"""
UpperCAmelCase = self.get_duplicate_clusters()
with open(__lowerCamelCase , """w""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def _UpperCamelCase ( lowerCAmelCase_ ) ->Tuple:
UpperCAmelCase , UpperCAmelCase = element
UpperCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Any:
UpperCAmelCase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=1_0_0 ) ):
di.add(lowerCAmelCase_ , lowerCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->float:
UpperCAmelCase = get_tokens(lowerCAmelCase_ )
UpperCAmelCase = get_tokens(lowerCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__a = None
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Dict:
UpperCAmelCase = []
for elementa in cluster:
UpperCAmelCase = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
UpperCAmelCase = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
UpperCAmelCase = 1
extremes.append(lowerCAmelCase_ )
return extremes
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->Optional[int]:
global _shared_dataset
UpperCAmelCase = dataset
UpperCAmelCase = []
UpperCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ):
extremes_list.append(lowerCAmelCase_ )
return extremes_list
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = 0.85 ) ->Tuple[Type[Dataset], List[List[Dict]]]:
UpperCAmelCase = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
UpperCAmelCase = {}
UpperCAmelCase = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
UpperCAmelCase = element
UpperCAmelCase = duplicate_indices - set(extreme_dict.keys() )
UpperCAmelCase = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
UpperCAmelCase = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
UpperCAmelCase = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"""Original dataset size: {len(lowerCAmelCase_ )}""" )
print(F"""Number of duplicate clusters: {len(lowerCAmelCase_ )}""" )
print(F"""Files in duplicate cluster: {len(lowerCAmelCase_ )}""" )
print(F"""Unique files in duplicate cluster: {len(lowerCAmelCase_ )}""" )
print(F"""Filtered dataset size: {len(lowerCAmelCase_ )}""" )
return ds_filter, duplicate_clusters
| 377 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["CLIPFeatureExtractor"]
SCREAMING_SNAKE_CASE__ = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703 |
from collections import defaultdict
class _UpperCAmelCase :
def __init__( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int):
SCREAMING_SNAKE_CASE_ :Dict = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
SCREAMING_SNAKE_CASE_ :Optional[int] = [
[-1 for i in range(total + 1)] for j in range(2 ** len(UpperCAmelCase))
]
SCREAMING_SNAKE_CASE_ :Tuple = defaultdict(UpperCAmelCase) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
SCREAMING_SNAKE_CASE_ :Tuple = (1 << len(UpperCAmelCase)) - 1
def _snake_case ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Dict):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
SCREAMING_SNAKE_CASE_ :Optional[int] = self.count_ways_until(UpperCAmelCase , task_no + 1)
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1)
# save the value.
SCREAMING_SNAKE_CASE_ :Union[str, Any] = total_ways_util
return self.dp[mask][task_no]
def _snake_case ( self : str , UpperCAmelCase : Optional[int]):
# Store the list of persons for each task
for i in range(len(UpperCAmelCase)):
for j in task_performed[i]:
self.task[j].append(UpperCAmelCase)
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
SCREAMING_SNAKE_CASE__ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 140 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : Dict = '▁'
__UpperCAmelCase : Optional[Any] = {'vocab_file': 'spiece.model'}
__UpperCAmelCase : Optional[int] = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
__UpperCAmelCase : List[str] = {
'google/pegasus-xsum': 512,
}
__UpperCAmelCase : Any = logging.get_logger(__name__)
class lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase : Dict = VOCAB_FILES_NAMES
UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Any = ['input_ids', 'attention_mask']
def __init__( self : str , __snake_case : Any , __snake_case : Dict="<pad>" , __snake_case : Optional[Any]="</s>" , __snake_case : Optional[int]="<unk>" , __snake_case : int="<mask_2>" , __snake_case : Union[str, Any]="<mask_1>" , __snake_case : List[str]=None , __snake_case : Tuple=103 , __snake_case : Dict = None , **__snake_case : int , ) -> List[str]:
_a : Dict = offset
if additional_special_tokens is not None:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError(
f"""additional_special_tokens should be of type {type(__lowerCamelCase )}, but is"""
f""" {type(__lowerCamelCase )}""" )
_a : Dict = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(__lowerCamelCase ) , self.offset - 1 )
]
if len(set(__lowerCamelCase ) ) != len(__lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
_a : Tuple = additional_special_tokens_extended
else:
_a : Any = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
_a : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , mask_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token_sent=__lowerCamelCase , offset=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_a : List[Any] = mask_token_sent
_a : Dict = vocab_file
_a : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
# add special tokens to encoder dict
_a : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
_a : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def snake_case_ ( self : str ) -> int:
return len(self.sp_model ) + self.offset
def snake_case_ ( self : Dict ) -> List[str]:
_a : Optional[Any] = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> Dict:
_a : int = self.__dict__.copy()
_a : Union[str, Any] = None
return state
def __setstate__( self : Union[str, Any] , __snake_case : int ) -> List[str]:
_a : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a : Tuple = {}
_a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self : List[Any] , __snake_case : int ) -> str:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def snake_case_ ( self : List[Any] , __snake_case : List[Any] ) -> Union[str, Any]:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
_a : Any = self.sp_model.piece_to_id(__lowerCamelCase )
return sp_id + self.offset
def snake_case_ ( self : Optional[int] , __snake_case : Optional[int] ) -> Tuple:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
_a : Optional[Any] = self.sp_model.IdToPiece(index - self.offset )
return token
def snake_case_ ( self : Tuple , __snake_case : Tuple ) -> List[str]:
_a : int = []
_a : Optional[Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
_a : Dict = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def snake_case_ ( self : Tuple , __snake_case : Union[str, Any]=False ) -> str:
return 1
def snake_case_ ( self : Dict , __snake_case : Union[str, Any] ) -> int:
_a : int = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def snake_case_ ( self : List[str] , __snake_case : str , __snake_case : str = None , __snake_case : Union[str, Any] = False ) -> Optional[int]:
if already_has_special_tokens:
return self._special_token_mask(__lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(__lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def snake_case_ ( self : Any , __snake_case : Dict , __snake_case : Any=None ) -> Tuple:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case_ ( self : Tuple , __snake_case : Dict , __snake_case : Optional[Any] = None ) -> int:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Optional[int] = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , '''wb''' ) as fi:
_a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 471 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """unispeech"""
def __init__( self , __lowerCamelCase=32 , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase="group" , __lowerCamelCase="gelu" , __lowerCamelCase=(512, 512, 512, 512, 512, 512, 512) , __lowerCamelCase=(5, 2, 2, 2, 2, 2, 2) , __lowerCamelCase=(10, 3, 3, 3, 3, 2, 2) , __lowerCamelCase=False , __lowerCamelCase=128 , __lowerCamelCase=16 , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_5 , __lowerCamelCase=10 , __lowerCamelCase=2 , __lowerCamelCase=0.0 , __lowerCamelCase=10 , __lowerCamelCase=0 , __lowerCamelCase=320 , __lowerCamelCase=2 , __lowerCamelCase=0.1 , __lowerCamelCase=100 , __lowerCamelCase=256 , __lowerCamelCase=256 , __lowerCamelCase=0.1 , __lowerCamelCase="mean" , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=256 , __lowerCamelCase=80 , __lowerCamelCase=0 , __lowerCamelCase=1 , __lowerCamelCase=2 , __lowerCamelCase=0.5 , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase )
__A : Dict = hidden_size
__A : Dict = feat_extract_norm
__A : int = feat_extract_activation
__A : Dict = list(__lowerCamelCase )
__A : str = list(__lowerCamelCase )
__A : Dict = list(__lowerCamelCase )
__A : str = conv_bias
__A : Optional[int] = num_conv_pos_embeddings
__A : int = num_conv_pos_embedding_groups
__A : Union[str, Any] = len(self.conv_dim )
__A : List[str] = num_hidden_layers
__A : Dict = intermediate_size
__A : List[str] = hidden_act
__A : int = num_attention_heads
__A : str = hidden_dropout
__A : str = attention_dropout
__A : Optional[Any] = activation_dropout
__A : Optional[int] = feat_proj_dropout
__A : Optional[Any] = final_dropout
__A : List[Any] = layerdrop
__A : Any = layer_norm_eps
__A : str = initializer_range
__A : List[str] = num_ctc_classes
__A : Dict = vocab_size
__A : Dict = do_stable_layer_norm
__A : Union[str, Any] = use_weighted_layer_sum
__A : Tuple = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : List[Any] = apply_spec_augment
__A : str = mask_time_prob
__A : Dict = mask_time_length
__A : List[str] = mask_time_min_masks
__A : Union[str, Any] = mask_feature_prob
__A : Tuple = mask_feature_length
__A : Any = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__A : int = num_codevectors_per_group
__A : Any = num_codevector_groups
__A : Optional[Any] = contrastive_logits_temperature
__A : Dict = feat_quantizer_dropout
__A : Tuple = num_negatives
__A : List[str] = codevector_dim
__A : str = proj_codevector_dim
__A : Union[str, Any] = diversity_loss_weight
# ctc loss
__A : Tuple = ctc_loss_reduction
__A : Optional[int] = ctc_zero_infinity
# pretraining loss
__A : str = replace_prob
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 177 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowercase : str = '''CompVis/stable-diffusion-v1-1'''
lowercase : Union[str, Any] = '''CompVis/stable-diffusion-v1-2'''
lowercase : Any = '''CompVis/stable-diffusion-v1-3'''
lowercase : Tuple = '''CompVis/stable-diffusion-v1-4'''
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ) -> Any:
super()._init_()
snake_case_ : List[Any] = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : int = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = StableDiffusionPipeline(
vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , requires_safety_checker=_SCREAMING_SNAKE_CASE , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _lowerCAmelCase ( self ) -> str:
return {k: getattr(self , _SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith("_" )}
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE = "auto" ) -> List[str]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case_ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> List[Any]:
self.enable_attention_slicing(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> str:
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> int:
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Any:
snake_case_ : Tuple = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(_SCREAMING_SNAKE_CASE )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
snake_case_ : Optional[Any] = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.2
snake_case_ : Dict = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.3
snake_case_ : str = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.4
snake_case_ : List[str] = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 703 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowercase : Dict = logging.get_logger(__name__)
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ : List[str] = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(_SCREAMING_SNAKE_CASE ) )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ : List[str] = [sequences]
snake_case_ : List[str] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(_SCREAMING_SNAKE_CASE )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE=ZeroShotClassificationArgumentHandler() , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ : Dict = args_parser
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def _lowerCAmelCase ( self ) -> str:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=TruncationStrategy.ONLY_FIRST , **_SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ : List[Any] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
snake_case_ : List[str] = self.tokenizer.eos_token
try:
snake_case_ : Union[str, Any] = self.tokenizer(
_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , )
except Exception as e:
if "too short" in str(_SCREAMING_SNAKE_CASE ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
snake_case_ : Union[str, Any] = self.tokenizer(
_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowerCAmelCase ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
if kwargs.get("multi_class" , _SCREAMING_SNAKE_CASE ) is not None:
snake_case_ : Any = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
snake_case_ : Any = {}
if "candidate_labels" in kwargs:
snake_case_ : Tuple = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
snake_case_ : Optional[Any] = kwargs["hypothesis_template"]
snake_case_ : Dict = {}
if "multi_label" in kwargs:
snake_case_ : List[Any] = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) -> str:
if len(_SCREAMING_SNAKE_CASE ) == 0:
pass
elif len(_SCREAMING_SNAKE_CASE ) == 1 and "candidate_labels" not in kwargs:
snake_case_ : int = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="This example is {}." ) -> str:
snake_case_ , snake_case_ : Optional[int] = self._args_parser(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i, (candidate_label, sequence_pair) in enumerate(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
snake_case_ : str = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(_SCREAMING_SNAKE_CASE ) - 1,
**model_input,
}
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ : Optional[Any] = inputs["candidate_label"]
snake_case_ : Dict = inputs["sequence"]
snake_case_ : Optional[int] = {k: inputs[k] for k in self.tokenizer.model_input_names}
snake_case_ : Dict = self.model(**_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
snake_case_ : str = [outputs["candidate_label"] for outputs in model_outputs]
snake_case_ : Union[str, Any] = [outputs["sequence"] for outputs in model_outputs]
snake_case_ : List[Any] = np.concatenate([output["logits"].numpy() for output in model_outputs] )
snake_case_ : Tuple = logits.shape[0]
snake_case_ : List[Any] = len(_SCREAMING_SNAKE_CASE )
snake_case_ : int = N // n
snake_case_ : Optional[Any] = logits.reshape((num_sequences, n, -1) )
if multi_label or len(_SCREAMING_SNAKE_CASE ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
snake_case_ : Any = self.entailment_id
snake_case_ : List[str] = -1 if entailment_id == 0 else 0
snake_case_ : Dict = reshaped_outputs[..., [contradiction_id, entailment_id]]
snake_case_ : List[str] = np.exp(_SCREAMING_SNAKE_CASE ) / np.exp(_SCREAMING_SNAKE_CASE ).sum(-1 , keepdims=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
snake_case_ : List[str] = reshaped_outputs[..., self.entailment_id]
snake_case_ : Optional[Any] = np.exp(_SCREAMING_SNAKE_CASE ) / np.exp(_SCREAMING_SNAKE_CASE ).sum(-1 , keepdims=_SCREAMING_SNAKE_CASE )
snake_case_ : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 114 | 0 |
from sklearn.metrics import recall_score
import datasets
snake_case = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
snake_case = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
snake_case = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] ,)
def __UpperCAmelCase ( self : Optional[Any] ,__A : Union[str, Any] ,__A : Tuple ,__A : Tuple=None ,__A : List[str]=1 ,__A : List[str]="binary" ,__A : Optional[int]=None ,__A : str="warn" ,) -> Optional[Any]:
_lowercase = recall_score(
__A ,__A ,labels=__A ,pos_label=__A ,average=__A ,sample_weight=__A ,zero_division=__A ,)
return {"recall": float(__A ) if score.size == 1 else score} | 67 |
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __lowercase :
"""simple docstring"""
_A : float
_A : TreeNode | None = None
_A : TreeNode | None = None
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : TreeNode | None ):
"""simple docstring"""
def is_valid_tree(SCREAMING_SNAKE_CASE__ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
SCREAMING_SNAKE_CASE__ : TreeNode | None , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , SCREAMING_SNAKE_CASE__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , SCREAMING_SNAKE_CASE__ )
)
return is_binary_search_tree_recursive_check(SCREAMING_SNAKE_CASE__ , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 480 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Union[str, Any] = [
"""word_embeddings_layernorm.weight""",
"""word_embeddings_layernorm.bias""",
"""input_layernorm.weight""",
"""input_layernorm.bias""",
"""post_attention_layernorm.weight""",
"""post_attention_layernorm.bias""",
"""self_attention.dense.bias""",
"""mlp.dense_4h_to_h.bias""",
"""ln_f.weight""",
"""ln_f.bias""",
]
UpperCamelCase : Tuple = [
"""mlp.dense_4h_to_h.weight""",
"""self_attention.dense.weight""",
]
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
a : int = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
a : int = int(re.match(R'.*layer_(\d*).*' , snake_case )[1] )
layer_number -= 3
return F"""h.{layer_number}.""" + key
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] ) -> Any:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
a : List[Any] = re.search(R'[^\d](\d+)$' , str(snake_case ) )
if bit_search is None:
raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" )
a : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : Dict , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int ) -> List[str]:
"""simple docstring"""
# Construct model
if bloom_config_file == "":
a : Optional[int] = BloomConfig()
else:
a : Union[str, Any] = BloomConfig.from_json_file(snake_case )
if shard_model:
a : Optional[Any] = os.listdir(snake_case )
a : str = sorted(filter(lambda snake_case : s.startswith('layer' ) and "model_00" in s , snake_case ) )
a : List[str] = {'weight_map': {}, 'metadata': {}}
a : int = 0
a : List[Any] = None
a : str = BloomConfig()
for j, file in enumerate(snake_case ):
print('Processing file: {}'.format(snake_case ) )
a : Union[str, Any] = None
for i in range(snake_case ):
# load all TP files
a : List[str] = file.replace('model_00' , F"""model_0{i}""" )
a : Dict = torch.load(os.path.join(snake_case , snake_case ) , map_location='cpu' )
# Rename keys in the transformers names
a : Dict = list(temp.keys() )
for key in keys:
a : List[Any] = temp.pop(snake_case )
if tensors is None:
a : Optional[Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(snake_case ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
a : List[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
a : Tuple = torch.cat([tensors[key], temp[key]] , dim=snake_case )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(snake_case ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
a : Dict = tensors[key] / pretraining_tp
torch.save(
snake_case , os.path.join(
snake_case , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(snake_case ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
a : Optional[Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
a : Union[str, Any] = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(snake_case ) ).zfill(5 ) )
a : Any = BloomConfig()
a : Dict = pytorch_dump_folder_path + '/' + CONFIG_NAME
a : List[str] = total_size
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(snake_case , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
a : Optional[int] = json.dumps(snake_case , indent=2 , sort_keys=snake_case ) + '\n'
f.write(snake_case )
else:
a : str = BloomModel(snake_case )
a : Optional[Any] = os.listdir(snake_case )
a : Optional[Any] = sorted(filter(lambda snake_case : s.startswith('layer' ) and "model_00" in s , snake_case ) )
a : Optional[int] = None
for i, file in enumerate(snake_case ):
a : Tuple = None
for i in range(snake_case ):
# load all TP files
a : Dict = file.replace('model_00' , F"""model_0{i}""" )
a : Optional[Any] = torch.load(os.path.join(snake_case , snake_case ) , map_location='cpu' )
# Rename keys in the transformers names
a : Optional[int] = list(temp.keys() )
for key in keys:
a : Optional[int] = temp.pop(snake_case )
if tensors is None:
a : Optional[Any] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(snake_case ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
a : str = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
a : List[Any] = torch.cat([tensors[key], temp[key]] , dim=snake_case )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(snake_case ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
a : Optional[Any] = tensors[key] / pretraining_tp
a : str = model.load_state_dict(snake_case , strict=snake_case )
assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
a : Any = set(other_keys.missing_keys )
else:
a : str = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(snake_case , exist_ok=snake_case )
a : List[str] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
a : Optional[Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
a : List[Any] = model.to(config.torch_dtype )
torch.save(model.state_dict() , snake_case )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bloom_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path to the Megatron-LM checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--bloom_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--shard_model""",
action="""store_true""",
help="""An optional setting to shard the output model \nThis enables sharding the converted checkpoint""",
)
parser.add_argument(
"""--pretraining_tp""",
default=4,
type=int,
help="""Pretraining TP rank that has been used when training the model in Megatron-LM \n""",
)
UpperCamelCase : List[str] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 701 | '''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
UpperCamelCase : Dict = logging.get_logger(__name__)
UpperCamelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : str = [
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
UpperCamelCase : List[Any] = {
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase : List[str] = {f'''funnel-transformer/{name}''': 512 for name in _model_names}
UpperCamelCase : List[Any] = {f'''funnel-transformer/{name}''': {"""do_lower_case""": True} for name in _model_names}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_INIT_CONFIGURATION
A : Optional[Any] = FunnelTokenizer
A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = 2
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Union[str, Any]="<sep>" , UpperCAmelCase_ : Union[str, Any]="<pad>" , UpperCAmelCase_ : Tuple="<cls>" , UpperCAmelCase_ : List[str]="<mask>" , UpperCAmelCase_ : Dict="<s>" , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[int]="##" , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , clean_text=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , wordpieces_prefix=UpperCAmelCase_ , **UpperCAmelCase_ , )
a : str = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , UpperCAmelCase_) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase_) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase_) != tokenize_chinese_chars
):
a : Optional[Any] = getattr(UpperCAmelCase_ , normalizer_state.pop('type'))
a : Any = do_lower_case
a : List[str] = strip_accents
a : List[Any] = tokenize_chinese_chars
a : Dict = normalizer_class(**UpperCAmelCase_)
a : Union[str, Any] = do_lower_case
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : str=None):
"""simple docstring"""
a : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
a : Any = [self.sep_token_id]
a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0]
return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
"""simple docstring"""
a : List[str] = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 610 | 0 |
_lowercase: Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def _lowerCamelCase ( snake_case ):
# Make sure the supplied data is a bytes-like object
if not isinstance(snake_case , snake_case ):
_lowerCAmelCase = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(snake_case )
_lowerCAmelCase = ''.join(bin(snake_case )[2:].zfill(8 ) for byte in data )
_lowerCAmelCase = len(snake_case ) % 6 != 0
if padding_needed:
# The padding that will be added later
_lowerCAmelCase = b'=' * ((6 - len(snake_case ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(snake_case ) % 6)
else:
_lowerCAmelCase = b''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(snake_case ) , 6 ) ).encode()
+ padding
)
def _lowerCamelCase ( snake_case ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(snake_case , snake_case ) and not isinstance(snake_case , snake_case ):
_lowerCAmelCase = (
'argument should be a bytes-like object or ASCII string, '
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(snake_case )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(snake_case , snake_case ):
try:
_lowerCAmelCase = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
_lowerCAmelCase = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(snake_case ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_lowerCAmelCase = encoded_data[:-padding]
_lowerCAmelCase = ''.join(
bin(B64_CHARSET.index(snake_case ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_lowerCAmelCase = ''.join(
bin(B64_CHARSET.index(snake_case ) )[2:].zfill(6 ) for char in encoded_data )
_lowerCAmelCase = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(snake_case ) , 8 )
]
return bytes(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 192 | def _lowerCamelCase ( snake_case = 10 ):
if not isinstance(snake_case , snake_case ) or n < 0:
raise ValueError('Invalid input' )
_lowerCAmelCase = 10**n
_lowerCAmelCase = 28_433 * (pow(2 , 7_830_457 , snake_case )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(1_0) = }""")
| 192 | 1 |
"""simple docstring"""
a : int = """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowercase__(A ) ->str:
"""simple docstring"""
lowercase__ : str= {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
lowercase__ : Stack[int]= Stack()
lowercase__ : Stack[str]= Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowerCAmelCase_ ) )
elif i in operators:
# RULE 2
operator_stack.push(lowerCAmelCase_ )
elif i == ")":
# RULE 4
lowercase__ : Any= operator_stack.peek()
operator_stack.pop()
lowercase__ : Dict= operand_stack.peek()
operand_stack.pop()
lowercase__ : Dict= operand_stack.peek()
operand_stack.pop()
lowercase__ : Optional[Any]= operators[opr](lowerCAmelCase_ , lowerCAmelCase_ )
operand_stack.push(lowerCAmelCase_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a : List[str] = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 707 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 85 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class __a ( unittest.TestCase ):
__UpperCamelCase : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCamelCase : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__UpperCamelCase : List[str] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__UpperCamelCase : Any = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : Tuple ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ZeroShotClassificationPipeline(
model=lowerCamelCase ,tokenizer=lowerCamelCase ,candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : Tuple ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = classifier("""Who are you voting for in 2020?""" ,candidate_labels="""politics""" )
self.assertEqual(lowerCamelCase ,{"""sequence""": ANY(lowerCamelCase ), """labels""": [ANY(lowerCamelCase )], """scores""": [ANY(lowerCamelCase )]} )
# No kwarg
__SCREAMING_SNAKE_CASE = classifier("""Who are you voting for in 2020?""" ,["""politics"""] )
self.assertEqual(lowerCamelCase ,{"""sequence""": ANY(lowerCamelCase ), """labels""": [ANY(lowerCamelCase )], """scores""": [ANY(lowerCamelCase )]} )
__SCREAMING_SNAKE_CASE = classifier("""Who are you voting for in 2020?""" ,candidate_labels=["""politics"""] )
self.assertEqual(lowerCamelCase ,{"""sequence""": ANY(lowerCamelCase ), """labels""": [ANY(lowerCamelCase )], """scores""": [ANY(lowerCamelCase )]} )
__SCREAMING_SNAKE_CASE = classifier("""Who are you voting for in 2020?""" ,candidate_labels="""politics, public health""" )
self.assertEqual(
lowerCamelCase ,{"""sequence""": ANY(lowerCamelCase ), """labels""": [ANY(lowerCamelCase ), ANY(lowerCamelCase )], """scores""": [ANY(lowerCamelCase ), ANY(lowerCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) ,1.0 )
__SCREAMING_SNAKE_CASE = classifier("""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
lowerCamelCase ,{"""sequence""": ANY(lowerCamelCase ), """labels""": [ANY(lowerCamelCase ), ANY(lowerCamelCase )], """scores""": [ANY(lowerCamelCase ), ANY(lowerCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) ,1.0 )
__SCREAMING_SNAKE_CASE = classifier(
"""Who are you voting for in 2020?""" ,candidate_labels="""politics""" ,hypothesis_template="""This text is about {}""" )
self.assertEqual(lowerCamelCase ,{"""sequence""": ANY(lowerCamelCase ), """labels""": [ANY(lowerCamelCase )], """scores""": [ANY(lowerCamelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
__SCREAMING_SNAKE_CASE = classifier(["""I am happy"""] ,["""positive""", """negative"""] )
self.assertEqual(
lowerCamelCase ,[
{"""sequence""": ANY(lowerCamelCase ), """labels""": [ANY(lowerCamelCase ), ANY(lowerCamelCase )], """scores""": [ANY(lowerCamelCase ), ANY(lowerCamelCase )]}
for i in range(1 )
] ,)
__SCREAMING_SNAKE_CASE = classifier(["""I am happy""", """I am sad"""] ,["""positive""", """negative"""] )
self.assertEqual(
lowerCamelCase ,[
{"""sequence""": ANY(lowerCamelCase ), """labels""": [ANY(lowerCamelCase ), ANY(lowerCamelCase )], """scores""": [ANY(lowerCamelCase ), ANY(lowerCamelCase )]}
for i in range(2 )
] ,)
with self.assertRaises(lowerCamelCase ):
classifier("""""" ,candidate_labels="""politics""" )
with self.assertRaises(lowerCamelCase ):
classifier(lowerCamelCase ,candidate_labels="""politics""" )
with self.assertRaises(lowerCamelCase ):
classifier("""Who are you voting for in 2020?""" ,candidate_labels="""""" )
with self.assertRaises(lowerCamelCase ):
classifier("""Who are you voting for in 2020?""" ,candidate_labels=lowerCamelCase )
with self.assertRaises(lowerCamelCase ):
classifier(
"""Who are you voting for in 2020?""" ,candidate_labels="""politics""" ,hypothesis_template="""Not formatting template""" ,)
with self.assertRaises(lowerCamelCase ):
classifier(
"""Who are you voting for in 2020?""" ,candidate_labels="""politics""" ,hypothesis_template=lowerCamelCase ,)
self.run_entailment_id(lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Pipeline ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = zero_shot_classifier.model.config
__SCREAMING_SNAKE_CASE = config.labelaid
__SCREAMING_SNAKE_CASE = zero_shot_classifier.entailment_id
__SCREAMING_SNAKE_CASE = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id ,-1 )
__SCREAMING_SNAKE_CASE = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id ,0 )
__SCREAMING_SNAKE_CASE = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id ,0 )
__SCREAMING_SNAKE_CASE = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id ,2 )
__SCREAMING_SNAKE_CASE = original_labelaid
self.assertEqual(lowerCamelCase ,zero_shot_classifier.entailment_id )
@require_torch
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline(
"""zero-shot-classification""" ,model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" ,framework="""pt""" ,)
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100 ,candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline(
"""zero-shot-classification""" ,model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" ,framework="""pt""" ,)
__SCREAMING_SNAKE_CASE = zero_shot_classifier(
"""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(lowerCamelCase ) ,{
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} ,)
@require_tf
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline(
"""zero-shot-classification""" ,model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" ,framework="""tf""" ,)
__SCREAMING_SNAKE_CASE = zero_shot_classifier(
"""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(lowerCamelCase ) ,{
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} ,)
@slow
@require_torch
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline("""zero-shot-classification""" ,model="""roberta-large-mnli""" ,framework="""pt""" )
__SCREAMING_SNAKE_CASE = zero_shot_classifier(
"""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(lowerCamelCase ) ,{
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} ,)
__SCREAMING_SNAKE_CASE = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" ,candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] ,multi_label=lowerCamelCase ,)
self.assertEqual(
nested_simplify(lowerCamelCase ) ,{
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} ,)
@slow
@require_tf
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline("""zero-shot-classification""" ,model="""roberta-large-mnli""" ,framework="""tf""" )
__SCREAMING_SNAKE_CASE = zero_shot_classifier(
"""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(lowerCamelCase ) ,{
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} ,)
__SCREAMING_SNAKE_CASE = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" ,candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] ,multi_label=lowerCamelCase ,)
self.assertEqual(
nested_simplify(lowerCamelCase ) ,{
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} ,)
| 109 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __a :
def __init__( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple=14 ,lowerCamelCase : Optional[Any]=7 ,lowerCamelCase : str=True ,lowerCamelCase : List[str]=True ,lowerCamelCase : Dict=True ,lowerCamelCase : Any=True ,lowerCamelCase : int=True ,lowerCamelCase : Dict=99 ,lowerCamelCase : Dict=32 ,lowerCamelCase : Optional[Any]=5 ,lowerCamelCase : Tuple=4 ,lowerCamelCase : Optional[int]=37 ,lowerCamelCase : Optional[int]="gelu" ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Dict=512 ,lowerCamelCase : int=16 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Tuple=0.02 ,lowerCamelCase : str=3 ,lowerCamelCase : Union[str, Any]=4 ,lowerCamelCase : Any=None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = use_mc_token_ids
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = self.vocab_size - 1
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_mc_token_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ,lowerCamelCase : Any ,lowerCamelCase : List[str] ,lowerCamelCase : str ,*lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = CTRLModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
model(lowerCamelCase ,token_type_ids=lowerCamelCase ,head_mask=lowerCamelCase )
model(lowerCamelCase ,token_type_ids=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : int ,lowerCamelCase : Dict ,*lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = CTRLLMHeadModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ,token_type_ids=lowerCamelCase ,labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : int ,lowerCamelCase : str ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Optional[Any] ,*lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = CTRLForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ,token_type_ids=lowerCamelCase ,labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class __a ( _snake_case, _snake_case, _snake_case, unittest.TestCase ):
__UpperCamelCase : str = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__UpperCamelCase : Dict = (CTRLLMHeadModel,) if is_torch_available() else ()
__UpperCamelCase : int = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : List[str] = True
__UpperCamelCase : Dict = False
__UpperCamelCase : Tuple = False
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Any ,lowerCamelCase : str ,lowerCamelCase : List[str] ,lowerCamelCase : int ,lowerCamelCase : Dict ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = CTRLModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,n_embd=37 )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = CTRLModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
@require_torch
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[1_1859, 0, 1611, 8]] ,dtype=torch.long ,device=lowerCamelCase ) # Legal the president is
__SCREAMING_SNAKE_CASE = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__SCREAMING_SNAKE_CASE = model.generate(lowerCamelCase ,do_sample=lowerCamelCase )
self.assertListEqual(output_ids[0].tolist() ,lowerCamelCase )
| 109 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : str = logging.get_logger(__name__)
lowerCAmelCase__ : Any = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''rwkv'''
SCREAMING_SNAKE_CASE = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : int , UpperCAmelCase_ : Union[str, Any]=50_277 , UpperCAmelCase_ : str=1_024 , UpperCAmelCase_ : Optional[Any]=4_096 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=1e-5 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : List[str]=6 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[Any]=True , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : List[Any] = context_length
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
__UpperCAmelCase : Optional[int] = intermediate_size if intermediate_size is not None else 4 * hidden_size
__UpperCAmelCase : Any = layer_norm_epsilon
__UpperCAmelCase : Optional[Any] = rescale_every
__UpperCAmelCase : Union[str, Any] = use_cache
__UpperCAmelCase : List[Any] = bos_token_id
__UpperCAmelCase : Tuple = eos_token_id
super().__init__(
tie_word_embeddings=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
| 702 |
'''simple docstring'''
def __UpperCamelCase ( _UpperCAmelCase ):
stooge(_UpperCAmelCase, 0, len(_UpperCAmelCase ) - 1 )
return arr
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
__UpperCAmelCase , __UpperCAmelCase : Dict = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
__UpperCAmelCase : str = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_UpperCAmelCase, _UpperCAmelCase, (h - t) )
# Recursively sort last 2/3 elements
stooge(_UpperCAmelCase, i + t, (_UpperCAmelCase) )
# Recursively sort first 2/3 elements
stooge(_UpperCAmelCase, _UpperCAmelCase, (h - t) )
if __name__ == "__main__":
lowerCAmelCase__ : Any = input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase__ : Any = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 329 | 0 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase__ ( unittest.TestCase ):
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = mock.Mock()
SCREAMING_SNAKE_CASE__ = 500
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = HTTPError
SCREAMING_SNAKE_CASE__ = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE__ = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=snake_case_ ) as mock_head:
SCREAMING_SNAKE_CASE__ = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = mock.Mock()
SCREAMING_SNAKE_CASE__ = 500
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = HTTPError
SCREAMING_SNAKE_CASE__ = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE__ = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=snake_case_ ) as mock_head:
SCREAMING_SNAKE_CASE__ = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def A_ ( self : str ):
try:
SCREAMING_SNAKE_CASE__ = tempfile.mktemp()
with open(snake_case_ , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , snake_case_ )
SCREAMING_SNAKE_CASE__ = AlbertTokenizer.from_pretrained(snake_case_ )
finally:
os.remove(snake_case_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , snake_case_ )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class lowercase__ ( unittest.TestCase ):
A__ : List[str] =["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def A_ ( cls : Optional[int] ):
SCREAMING_SNAKE_CASE__ = TOKEN
HfFolder.save_token(snake_case_ )
@classmethod
def A_ ( cls : Optional[Any] ):
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def A_ ( self : Dict ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = os.path.join(snake_case_ , 'vocab.txt' )
with open(snake_case_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = BertTokenizer(snake_case_ )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case_ , repo_id='test-tokenizer' , push_to_hub=snake_case_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def A_ ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = os.path.join(snake_case_ , 'vocab.txt' )
with open(snake_case_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = BertTokenizer(snake_case_ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
snake_case_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=snake_case_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def A_ ( self : int ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = os.path.join(snake_case_ , 'vocab.txt' )
with open(snake_case_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = CustomTokenizer(snake_case_ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=snake_case_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = os.path.join(snake_case_ , 'vocab.txt' )
with open(snake_case_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = BertTokenizerFast.from_pretrained(snake_case_ )
bert_tokenizer.save_pretrained(snake_case_ )
SCREAMING_SNAKE_CASE__ = CustomTokenizerFast.from_pretrained(snake_case_ )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=snake_case_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
F'{USER}/test-dynamic-tokenizer' , use_fast=snake_case_ , trust_remote_code=snake_case_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class lowercase__ ( unittest.TestCase ):
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = Trie()
SCREAMING_SNAKE_CASE__ = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(snake_case_ , ['AB', 'C'] )
| 472 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case_ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(snake_case_ , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(snake_case_ , '''num_encoder_blocks''' ) )
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=6_4 , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[1_6, 3_2, 6_4, 1_2_8] , snake_case_=[1, 4, 8, 1_6] , snake_case_=[1, 2, 4, 8] , snake_case_=True , snake_case_=True , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0_2 , snake_case_=3 , snake_case_=None , ) -> int:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = num_encoder_blocks
__lowercase = sr_ratios
__lowercase = depths
__lowercase = hidden_sizes
__lowercase = downsampling_rates
__lowercase = num_attention_heads
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = scope
def A ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
'''simple docstring'''
__lowercase = SegformerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(snake_case_ )
__lowercase = __lowercase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def A ( self , snake_case_ , snake_case_ , snake_case_ ) -> str:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = SegformerForSemanticSegmentation(snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(snake_case_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__lowercase = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
'''simple docstring'''
__lowercase = 1
__lowercase = SegformerForSemanticSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(snake_case_ )
__lowercase = model(snake_case_ , labels=snake_case_ )
self.parent.assertGreater(result.loss , 0.0 )
def A ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def A ( self ) -> Any:
'''simple docstring'''
__lowercase = SegformerModelTester(self )
__lowercase = SegformerConfigTester(self , config_class=snake_case_ )
def A ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def A ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*snake_case_ )
def A ( self ) -> int:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*snake_case_ )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def A ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def A ( self ) -> str:
'''simple docstring'''
pass
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(snake_case_ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowercase = outputs.attentions
__lowercase = sum(self.model_tester.depths )
self.assertEqual(len(snake_case_ ) , snake_case_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowercase = outputs.attentions
self.assertEqual(len(snake_case_ ) , snake_case_ )
# verify the first attentions (first block, first layer)
__lowercase = (self.model_tester.image_size // 4) ** 2
__lowercase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__lowercase = (self.model_tester.image_size // 3_2) ** 2
__lowercase = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__lowercase = len(snake_case_ )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 1 , len(snake_case_ ) )
__lowercase = outputs.attentions
self.assertEqual(len(snake_case_ ) , snake_case_ )
# verify the first attentions (first block, first layer)
__lowercase = (self.model_tester.image_size // 4) ** 2
__lowercase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def A ( self ) -> Dict:
'''simple docstring'''
def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ):
__lowercase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowercase = outputs.hidden_states
__lowercase = self.model_tester.num_encoder_blocks
self.assertEqual(len(snake_case_ ) , snake_case_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def A ( self ) -> str:
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
for model_class in self.all_model_classes:
if model_class in get_values(snake_case_ ):
continue
__lowercase = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
__lowercase = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
__lowercase = model(**snake_case_ ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self ) -> List[Any]:
'''simple docstring'''
pass
@slow
def A ( self ) -> str:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = SegformerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowercase_ ( ):
'''simple docstring'''
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case_ , align=snake_case_ , do_random_crop=snake_case_ )
__lowercase = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
snake_case_ )
__lowercase = prepare_img()
__lowercase = image_processor(images=snake_case_ , return_tensors='''pt''' )
__lowercase = encoded_inputs.pixel_values.to(snake_case_ )
with torch.no_grad():
__lowercase = model(snake_case_ )
__lowercase = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , snake_case_ )
__lowercase = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , snake_case_ , atol=1e-4 ) )
@slow
def A ( self ) -> int:
'''simple docstring'''
__lowercase = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case_ , align=snake_case_ , do_random_crop=snake_case_ )
__lowercase = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(snake_case_ )
__lowercase = prepare_img()
__lowercase = image_processor(images=snake_case_ , return_tensors='''pt''' )
__lowercase = encoded_inputs.pixel_values.to(snake_case_ )
with torch.no_grad():
__lowercase = model(snake_case_ )
__lowercase = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , snake_case_ )
__lowercase = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , snake_case_ , atol=1e-1 ) )
@slow
def A ( self ) -> int:
'''simple docstring'''
__lowercase = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case_ , align=snake_case_ , do_random_crop=snake_case_ )
__lowercase = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
snake_case_ )
__lowercase = prepare_img()
__lowercase = image_processor(images=snake_case_ , return_tensors='''pt''' )
__lowercase = encoded_inputs.pixel_values.to(snake_case_ )
with torch.no_grad():
__lowercase = model(snake_case_ )
__lowercase = outputs.logits.detach().cpu()
__lowercase = image_processor.post_process_semantic_segmentation(outputs=snake_case_ , target_sizes=[(5_0_0, 3_0_0)] )
__lowercase = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , snake_case_ )
__lowercase = image_processor.post_process_semantic_segmentation(outputs=snake_case_ )
__lowercase = torch.Size((1_2_8, 1_2_8) )
self.assertEqual(segmentation[0].shape , snake_case_ )
| 639 | 0 |
"""simple docstring"""
import numpy as np
class __A :
def __init__( self ):
_lowerCAmelCase : List[Any] = (0, 0)
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : str = 0
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : str = 0
def __eq__( self , a__ ):
return self.position == cell.position
def __A ( self ):
print(self.position )
class __A :
def __init__( self , a__=(5, 5) ):
_lowerCAmelCase : Optional[Any] = np.zeros(a__ )
_lowerCAmelCase : Dict = world_size[0]
_lowerCAmelCase : List[Any] = world_size[1]
def __A ( self ):
print(self.w )
def __A ( self , a__ ):
_lowerCAmelCase : Dict = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
_lowerCAmelCase : List[Any] = cell.position[0]
_lowerCAmelCase : Union[str, Any] = cell.position[1]
_lowerCAmelCase : Optional[Any] = []
for n in neughbour_cord:
_lowerCAmelCase : Dict = current_x + n[0]
_lowerCAmelCase : Union[str, Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
_lowerCAmelCase : int = Cell()
_lowerCAmelCase : Tuple = (x, y)
_lowerCAmelCase : Optional[Any] = cell
neighbours.append(a__ )
return neighbours
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : List[str] ) -> Optional[int]:
_lowerCAmelCase : str = []
_lowerCAmelCase : Any = []
_open.append(_lowerCamelCase )
while _open:
_lowerCAmelCase : Optional[int] = np.argmin([n.f for n in _open] )
_lowerCAmelCase : Optional[Any] = _open[min_f]
_closed.append(_open.pop(_lowerCamelCase ) )
if current == goal:
break
for n in world.get_neigbours(_lowerCamelCase ):
for c in _closed:
if c == n:
continue
_lowerCAmelCase : Dict = current.g + 1
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = n.position
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = goal.position
_lowerCAmelCase : Optional[Any] = (ya - ya) ** 2 + (xa - xa) ** 2
_lowerCAmelCase : Dict = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_lowerCamelCase )
_lowerCAmelCase : Tuple = []
while current.parent is not None:
path.append(current.position )
_lowerCAmelCase : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
_a : Tuple = Gridworld()
# Start position and goal
_a : int = Cell()
_a : Optional[int] = (0, 0)
_a : Optional[int] = Cell()
_a : List[Any] = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
_a : int = astar(world, start, goal)
# Just for visual reasons.
for i in s:
_a : List[str] = 1
print(world.w)
| 663 | """simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ) -> Dict:
_lowerCAmelCase : List[str] = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
_lowerCAmelCase : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCAmelCase : Any = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCAmelCase : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float64""" ,[dim] )
_lowerCAmelCase : Optional[int] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCAmelCase : Dict = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCAmelCase : List[Any] = tf.placeholder("""int32""" )
_lowerCAmelCase : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float""" ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCAmelCase : Optional[int] = tf.reduce_mean(_lowerCamelCase ,0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCAmelCase : Dict = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase ,_lowerCamelCase ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[noofclusters] )
_lowerCAmelCase : str = tf.argmin(_lowerCamelCase ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCAmelCase : List[str] = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCAmelCase : Any = [
sess.run(_lowerCamelCase ,feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCAmelCase : Any = sess.run(
_lowerCamelCase ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
_lowerCAmelCase : List[Any] = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCAmelCase : Optional[int] = sess.run(
_lowerCamelCase ,feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCAmelCase : Optional[int] = sess.run(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sess.run(_lowerCamelCase )
return centroids, assignments
| 663 | 1 |
import unittest
from knapsack import knapsack as k
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> str:
snake_case_ = 0
snake_case_ = [0]
snake_case_ = [0]
snake_case_ = len(lowercase_ )
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , 0 )
snake_case_ = [60]
snake_case_ = [10]
snake_case_ = len(lowercase_ )
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , 0 )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = 3
snake_case_ = [1, 2, 3]
snake_case_ = [3, 2, 1]
snake_case_ = len(lowercase_ )
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , 5 )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = 50
snake_case_ = [60, 1_00, 1_20]
snake_case_ = [10, 20, 30]
snake_case_ = len(lowercase_ )
self.assertEqual(k.knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , 2_20 )
if __name__ == "__main__":
unittest.main()
| 198 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
def __init__( self : int ,lowercase_ : UNetaDModel ,lowercase_ : ScoreSdeVeScheduler ):
super().__init__()
self.register_modules(unet=lowercase_ ,scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : List[str] ,lowercase_ : int = 1 ,lowercase_ : int = 2_0_0_0 ,lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowercase_ : Optional[str] = "pil" ,lowercase_ : bool = True ,**lowercase_ : Dict ,):
lowerCAmelCase__ : str = self.unet.config.sample_size
lowerCAmelCase__ : int = (batch_size, 3, img_size, img_size)
lowerCAmelCase__ : List[Any] = self.unet
lowerCAmelCase__ : Tuple = randn_tensor(lowercase_ ,generator=lowercase_ ) * self.scheduler.init_noise_sigma
lowerCAmelCase__ : List[Any] = sample.to(self.device )
self.scheduler.set_timesteps(lowercase_ )
self.scheduler.set_sigmas(lowercase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCAmelCase__ : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowerCAmelCase__ : str = self.unet(lowercase_ ,lowercase_ ).sample
lowerCAmelCase__ : List[str] = self.scheduler.step_correct(lowercase_ ,lowercase_ ,generator=lowercase_ ).prev_sample
# prediction step
lowerCAmelCase__ : Dict = model(lowercase_ ,lowercase_ ).sample
lowerCAmelCase__ : Optional[int] = self.scheduler.step_pred(lowercase_ ,lowercase_ ,lowercase_ ,generator=lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = output.prev_sample, output.prev_sample_mean
lowerCAmelCase__ : List[str] = sample_mean.clamp(0 ,1 )
lowerCAmelCase__ : Union[str, Any] = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ : Union[str, Any] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowercase_ )
| 450 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __magic_name__ :
def __init__( self , snake_case_ , snake_case_=14 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
lowercase =parent
lowercase =batch_size
lowercase =seq_length
lowercase =is_training
lowercase =use_token_type_ids
lowercase =use_input_mask
lowercase =use_labels
lowercase =use_mc_token_ids
lowercase =vocab_size
lowercase =hidden_size
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =intermediate_size
lowercase =hidden_act
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =max_position_embeddings
lowercase =type_vocab_size
lowercase =type_sequence_label_size
lowercase =initializer_range
lowercase =num_labels
lowercase =num_choices
lowercase =scope
lowercase =self.vocab_size - 1
def _A( self ):
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase =None
if self.use_input_mask:
lowercase =random_attention_mask([self.batch_size, self.seq_length] )
lowercase =None
if self.use_token_type_ids:
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase =None
if self.use_mc_token_ids:
lowercase =ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
lowercase =None
lowercase =None
lowercase =None
if self.use_labels:
lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase =ids_tensor([self.batch_size] , self.num_choices )
lowercase =self.get_config()
lowercase =ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _A( self ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ):
lowercase =CTRLModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
model(lowercase_ , token_type_ids=lowercase_ , head_mask=lowercase_ )
model(lowercase_ , token_type_ids=lowercase_ )
lowercase =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ):
lowercase =CTRLLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase =model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A( self ):
lowercase =self.prepare_config_and_inputs()
(
lowercase
) =config_and_inputs
lowercase ={"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ):
lowercase =self.num_labels
lowercase =CTRLForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase =model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __magic_name__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCamelCase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCamelCase__ = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _A( self ):
lowercase =CTRLModelTester(self )
lowercase =ConfigTester(self , config_class=lowercase_ , n_embd=37 )
def _A( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _A( self ):
self.config_tester.run_common_tests()
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowercase_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _A( self ):
pass
@slow
def _A( self ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase =CTRLModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _A( self ):
pass
@require_torch
class __magic_name__ ( unittest.TestCase ):
def _A( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _A( self ):
lowercase =CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(lowercase_ )
lowercase =torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=lowercase_ ) # Legal the president is
lowercase =[
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowercase =model.generate(lowercase_ , do_sample=lowercase_ )
self.assertListEqual(output_ids[0].tolist() , lowercase_ )
| 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Optional[Any] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_UpperCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 145 | 0 |
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowercase_ : Union[str, Any] = TypeVar('''T''')
lowercase_ : Tuple = Union[List[T], Tuple[T, ...]]
lowercase_ : Any = Union[T, List[T], Dict[str, T]]
lowercase_ : List[Any] = Union[str, bytes, os.PathLike]
| 572 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowercase_ : List[str] = datasets.utils.logging.get_logger(__name__)
lowercase_ : List[Any] = ['''names''', '''prefix''']
lowercase_ : List[str] = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
lowercase_ : Optional[Any] = ['''encoding_errors''', '''on_bad_lines''']
lowercase_ : Optional[Any] = ['''date_format''']
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
A__ = ","
A__ = None
A__ = "infer"
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = True
A__ = None
A__ = None
A__ = None
A__ = None
A__ = False
A__ = None
A__ = None
A__ = None
A__ = True
A__ = True
A__ = False
A__ = True
A__ = None
A__ = "."
A__ = None
A__ = '"'
A__ = 0
A__ = None
A__ = None
A__ = None
A__ = None
A__ = True
A__ = True
A__ = 0
A__ = True
A__ = False
A__ = None
A__ = 10000
A__ = None
A__ = "strict"
A__ = "error"
A__ = None
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
if self.delimiter is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.delimiter
if self.column_names is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.column_names
@property
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , snake_case__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class UpperCamelCase ( datasets.ArrowBasedBuilder ):
A__ = CsvConfig
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_SCREAMING_SNAKE_CASE : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case__ , (str, list, tuple) ):
_SCREAMING_SNAKE_CASE : Optional[Any] = data_files
if isinstance(snake_case__ , snake_case__ ):
_SCREAMING_SNAKE_CASE : Optional[int] = [files]
_SCREAMING_SNAKE_CASE : Tuple = [dl_manager.iter_files(snake_case__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(snake_case__ , snake_case__ ):
_SCREAMING_SNAKE_CASE : Optional[int] = [files]
_SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(snake_case__ ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case__ , gen_kwargs={"files": files} ) )
return splits
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
if self.config.features is not None:
_SCREAMING_SNAKE_CASE : Any = self.config.features.arrow_schema
if all(not require_storage_cast(snake_case__ ) for feature in self.config.features.values() ):
# cheaper cast
_SCREAMING_SNAKE_CASE : Optional[int] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=snake_case__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_SCREAMING_SNAKE_CASE : Union[str, Any] = table_cast(snake_case__ , snake_case__ )
return pa_table
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(snake_case__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case__ ) ):
_SCREAMING_SNAKE_CASE : int = pd.read_csv(snake_case__ , iterator=snake_case__ , dtype=snake_case__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE : Any = pa.Table.from_pandas(snake_case__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(snake_case__ )}: {e}''' )
raise
| 572 | 1 |
import requests
from bsa import BeautifulSoup
def a ( SCREAMING_SNAKE_CASE_ : str = "AAPL" ):
"""simple docstring"""
UpperCamelCase : Dict = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
UpperCamelCase : Any = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , '''html.parser''' )
UpperCamelCase : Dict = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 643 |
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCamelCase : List[str] = len(bin(SCREAMING_SNAKE_CASE_ )[3:] )
UpperCamelCase : List[str] = bin(abs(SCREAMING_SNAKE_CASE_ ) - (1 << binary_number_length) )[3:]
UpperCamelCase : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE_ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643 | 1 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
_lowercase: str = AutoConfig.from_pretrained(UpperCamelCase__ )
_lowercase: Optional[int] = FlaxAutoModelForSeqaSeqLM.from_config(config=UpperCamelCase__ )
_lowercase: List[str] = checkpoints.load_tax_checkpoint(UpperCamelCase__ )
_lowercase: Any = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
_lowercase: Union[str, Any] = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_lowercase: List[Any] = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowercase: Optional[Any] = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
_lowercase: int = f"layers_{str(UpperCamelCase__ )}"
# Self-Attention
_lowercase: int = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
_lowercase: List[str] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
_lowercase: Union[str, Any] = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
_lowercase: Dict = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowercase: int = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
_lowercase: str = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
_lowercase: Union[str, Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_lowercase: str = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_lowercase: int = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
_lowercase: Optional[Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_lowercase: List[Any] = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_lowercase: List[Any] = flax_model.params["encoder"]["block"][str(UpperCamelCase__ )]["layer"]
_lowercase: Optional[int] = tax_attention_key
_lowercase: str = tax_attention_out
_lowercase: int = tax_attention_query
_lowercase: Optional[Any] = tax_attention_value
_lowercase: List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowercase: Optional[Any] = tax_global_layer_norm
if split_mlp_wi:
_lowercase: int = tax_mlp_wi_a
_lowercase: Union[str, Any] = tax_mlp_wi_a
else:
_lowercase: int = tax_mlp_wi
_lowercase: List[str] = tax_mlp_wo
_lowercase: Optional[Any] = tax_mlp_layer_norm
_lowercase: str = flax_model_encoder_layer_block
# Only for layer 0:
_lowercase: Optional[int] = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
_lowercase: int = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowercase: Any = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
_lowercase: int = tax_encoder_global_rel_embedding
# Assigning
_lowercase: Union[str, Any] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
_lowercase: Optional[int] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_lowercase: List[Any] = f"layers_{str(UpperCamelCase__ )}"
# Self-Attention
_lowercase: str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
_lowercase: List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
_lowercase: Union[str, Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
_lowercase: str = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
_lowercase: Any = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
_lowercase: str = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
_lowercase: Union[str, Any] = tax_enc_dec_attention_module["key"]["kernel"]
_lowercase: Optional[int] = tax_enc_dec_attention_module["out"]["kernel"]
_lowercase: str = tax_enc_dec_attention_module["query"]["kernel"]
_lowercase: Union[str, Any] = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
_lowercase: List[str] = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
_lowercase: Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_lowercase: Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_lowercase: Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
_lowercase: str = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_lowercase: List[Any] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_lowercase: Tuple = flax_model.params["decoder"]["block"][str(UpperCamelCase__ )]["layer"]
_lowercase: List[str] = tax_attention_key
_lowercase: Tuple = tax_attention_out
_lowercase: Dict = tax_attention_query
_lowercase: int = tax_attention_value
_lowercase: Union[str, Any] = tax_pre_attention_layer_norm
_lowercase: Dict = tax_enc_dec_attention_key
_lowercase: Union[str, Any] = tax_enc_dec_attention_out
_lowercase: Any = tax_enc_dec_attention_query
_lowercase: Tuple = tax_enc_dec_attention_value
_lowercase: Optional[Any] = tax_cross_layer_norm
if split_mlp_wi:
_lowercase: Optional[int] = tax_mlp_wi_a
_lowercase: str = tax_mlp_wi_a
else:
_lowercase: Any = tax_mlp_wi
_lowercase: Dict = tax_mlp_wo
_lowercase: Tuple = txa_mlp_layer_norm
_lowercase: Any = flax_model_decoder_layer_block
# Decoder Normalization
_lowercase: List[Any] = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
_lowercase: int = txa_decoder_norm
# Only for layer 0:
_lowercase: Any = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
_lowercase: Tuple = tax_decoder_rel_embedding
# Token Embeddings
_lowercase: str = tax_model["target"]["token_embedder"]["embedding"]
_lowercase: List[str] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_lowercase: Optional[Any] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(UpperCamelCase__ )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 226 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase__ =logging.get_logger(__name__)
UpperCAmelCase__ ={
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCamelCase__ ( _a ):
a : Tuple = """gptj"""
a : Union[str, Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , A_ : Optional[int]=5_0_4_0_0 , A_ : Optional[Any]=2_0_4_8 , A_ : Optional[Any]=4_0_9_6 , A_ : Any=2_8 , A_ : Union[str, Any]=1_6 , A_ : int=6_4 , A_ : int=None , A_ : str="gelu_new" , A_ : str=0.0 , A_ : Optional[Any]=0.0 , A_ : Dict=0.0 , A_ : Dict=1e-5 , A_ : Optional[int]=0.02 , A_ : List[str]=True , A_ : List[Any]=5_0_2_5_6 , A_ : Optional[int]=5_0_2_5_6 , A_ : List[Any]=False , **A_ : int , ):
'''simple docstring'''
__lowercase = vocab_size
__lowercase = n_positions
__lowercase = n_embd
__lowercase = n_layer
__lowercase = n_head
__lowercase = n_inner
__lowercase = rotary_dim
__lowercase = activation_function
__lowercase = resid_pdrop
__lowercase = embd_pdrop
__lowercase = attn_pdrop
__lowercase = layer_norm_epsilon
__lowercase = initializer_range
__lowercase = use_cache
__lowercase = bos_token_id
__lowercase = eos_token_id
super().__init__(
bos_token_id=A_ , eos_token_id=A_ , tie_word_embeddings=A_ , **A_ )
class lowerCamelCase__ ( _a ):
def __init__( self : int , A_ : PretrainedConfig , A_ : str = "default" , A_ : List[PatchingSpec] = None , A_ : bool = False , ):
'''simple docstring'''
super().__init__(A_ , task=A_ , patching_specs=A_ , use_past=A_ )
if not getattr(self._config , """pad_token_id""" , A_ ):
# TODO: how to do that better?
__lowercase = 0
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(A_ , direction="""inputs""" )
__lowercase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowercase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
return self._config.n_head
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , A_ : PreTrainedTokenizer , A_ : int = -1 , A_ : int = -1 , A_ : bool = False , A_ : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase = super(A_ , self ).generate_dummy_inputs(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
# We need to order the input in the way they appears in the forward()
__lowercase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowercase , __lowercase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowercase = [
(torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(self.num_layers )
]
__lowercase = common_inputs["""attention_mask"""]
if self.use_past:
__lowercase = ordered_inputs["""attention_mask"""].dtype
__lowercase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return 1_3
| 616 | 0 |
import collections
import importlib.util
import os
import re
from pathlib import Path
snake_case = """src/transformers"""
# Matches is_xxx_available()
snake_case = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
snake_case = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
snake_case = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
snake_case = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
snake_case = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
snake_case = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
snake_case = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
snake_case = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
snake_case = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
snake_case = re.compile(R"""^\s*try:""")
# Catches a line with else:
snake_case = re.compile(R"""^\s*else:""")
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple ) -> int:
if _re_test_backend.search(snake_case__ ) is None:
return None
_lowercase = [b[0] for b in _re_backend.findall(snake_case__ )]
backends.sort()
return "_and_".join(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict ) -> str:
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowercase = f.readlines()
_lowercase = 0
while line_index < len(snake_case__ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(snake_case__ ):
return None
# First grab the objects without a specific backend in _import_structure
_lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
_lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(snake_case__ ):
_lowercase = _re_one_line_import_struct.search(snake_case__ ).groups()[0]
_lowercase = re.findall('\[([^\]]+)\]' , snake_case__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
_lowercase = _re_import_struct_key_value.search(snake_case__ )
if single_line_import_search is not None:
_lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(snake_case__ ) > 0]
objects.extend(snake_case__ )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
_lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
_lowercase = lines[line_index]
if _re_import_struct_add_one.search(snake_case__ ) is not None:
objects.append(_re_import_struct_add_one.search(snake_case__ ).groups()[0] )
elif _re_import_struct_add_many.search(snake_case__ ) is not None:
_lowercase = _re_import_struct_add_many.search(snake_case__ ).groups()[0].split(', ' )
_lowercase = [obj[1:-1] for obj in imports if len(snake_case__ ) > 0]
objects.extend(snake_case__ )
elif _re_between_brackets.search(snake_case__ ) is not None:
_lowercase = _re_between_brackets.search(snake_case__ ).groups()[0].split(', ' )
_lowercase = [obj[1:-1] for obj in imports if len(snake_case__ ) > 0]
objects.extend(snake_case__ )
elif _re_quote_object.search(snake_case__ ) is not None:
objects.append(_re_quote_object.search(snake_case__ ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
_lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_lowercase = []
while (
line_index < len(snake_case__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
_lowercase = lines[line_index]
_lowercase = _re_import.search(snake_case__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(snake_case__ ):
# If the line is an if is_backend_available, we grab all objects associated.
_lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
_lowercase = lines[line_index]
_lowercase = _re_import.search(snake_case__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :Union[str, Any] ) -> Union[str, Any]:
def find_duplicates(snake_case__ :Optional[Any] ):
return [k for k, v in collections.Counter(snake_case__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_lowercase = []
for key in import_dict_objects.keys():
_lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
_lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_lowercase = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def SCREAMING_SNAKE_CASE__ ( ) -> str:
_lowercase = []
for root, _, files in os.walk(snake_case__ ):
if "__init__.py" in files:
_lowercase = os.path.join(snake_case__ , '__init__.py' )
_lowercase = parse_init(snake_case__ )
if objects is not None:
_lowercase = analyze_results(*snake_case__ )
if len(snake_case__ ) > 0:
_lowercase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(snake_case__ ) )
if len(snake_case__ ) > 0:
raise ValueError('\n\n'.join(snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
_lowercase = []
for path, directories, files in os.walk(snake_case__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(snake_case__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(snake_case__ ) / folder).glob('*.py' ) ) ) == 0:
continue
_lowercase = str((Path(snake_case__ ) / folder).relative_to(snake_case__ ) )
_lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(snake_case__ )
for fname in files:
if fname == "__init__.py":
continue
_lowercase = str((Path(snake_case__ ) / fname).relative_to(snake_case__ ) )
_lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(snake_case__ )
return submodules
snake_case = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
# This is to make sure the transformers module imported is the one in the repo.
_lowercase = importlib.util.spec_from_file_location(
'transformers' , os.path.join(snake_case__ , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_lowercase = spec.loader.load_module()
_lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(snake_case__ ) > 0:
_lowercase = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules() | 535 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = PegasusTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = PegasusTokenizerFast
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ : Optional[int] = True
def __UpperCAmelCase ( self : List[str] ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase = PegasusTokenizer(__A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def __UpperCAmelCase ( self : Any ,**__A : int ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**__A )
def __UpperCAmelCase ( self : Union[str, Any] ,__A : int ) -> List[str]:
return ("This is a test", "This is a test")
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
_lowercase = '</s>'
_lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) ,__A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
_lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<pad>' )
self.assertEqual(vocab_keys[1] ,'</s>' )
self.assertEqual(vocab_keys[-1] ,'v' )
self.assertEqual(len(__A ) ,1103 )
def __UpperCAmelCase ( self : Tuple ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size ,1103 )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
_lowercase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_lowercase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_lowercase = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
_lowercase = rust_tokenizer([raw_input_str] ,return_tensors=__A ,add_special_tokens=__A ).input_ids[0]
_lowercase = py_tokenizer([raw_input_str] ,return_tensors=__A ,add_special_tokens=__A ).input_ids[0]
self.assertListEqual(__A ,__A )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
_lowercase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_lowercase = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
_lowercase = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_lowercase = tokenizer([raw_input_str] ,return_tensors=__A ).input_ids[0]
self.assertListEqual(__A ,__A )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
_lowercase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_lowercase = 'To ensure a smooth flow of bank resolutions.'
_lowercase = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_lowercase = tokenizer([raw_input_str] ,return_tensors=__A ).input_ids[0]
self.assertListEqual(__A ,__A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
_lowercase = ['This is going to be way too long.' * 150, 'short example']
_lowercase = ['not super long but more than 5 tokens', 'tiny']
_lowercase = self._large_tokenizer(__A ,padding=__A ,truncation=__A ,return_tensors='pt' )
_lowercase = self._large_tokenizer(
text_target=__A ,max_length=5 ,padding=__A ,truncation=__A ,return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(__A ) == 2 # input_ids, attention_mask.
@slow
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
# fmt: off
_lowercase = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A ,model_name='google/bigbird-pegasus-large-arxiv' ,revision='ba85d0851d708441f91440d509690f1ab6353415' ,)
@require_sentencepiece
@require_tokenizers
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = PegasusTokenizer
SCREAMING_SNAKE_CASE_ : int = PegasusTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : List[Any] = True
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase = PegasusTokenizer(__A ,offset=0 ,mask_token_sent=__A ,mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def __UpperCAmelCase ( self : Union[str, Any] ,**__A : Union[str, Any] ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**__A )
def __UpperCAmelCase ( self : Union[str, Any] ,__A : int ) -> Tuple:
return ("This is a test", "This is a test")
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
_lowercase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_lowercase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_lowercase = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
_lowercase = rust_tokenizer([raw_input_str] ,return_tensors=__A ,add_special_tokens=__A ).input_ids[0]
_lowercase = py_tokenizer([raw_input_str] ,return_tensors=__A ,add_special_tokens=__A ).input_ids[0]
self.assertListEqual(__A ,__A )
@require_torch
def __UpperCAmelCase ( self : List[str] ) -> Dict:
_lowercase = ['This is going to be way too long.' * 1000, 'short example']
_lowercase = ['not super long but more than 5 tokens', 'tiny']
_lowercase = self._large_tokenizer(__A ,padding=__A ,truncation=__A ,return_tensors='pt' )
_lowercase = self._large_tokenizer(
text_target=__A ,max_length=5 ,padding=__A ,truncation=__A ,return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(__A ) == 2 # input_ids, attention_mask.
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
_lowercase = self._large_tokenizer(__A ).input_ids
self.assertListEqual(
__A ,[182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] ,) | 535 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = filter(lambda lowercase : p.requires_grad , model.parameters() )
__lowercase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__a : str = logging.getLogger(__name__)
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
if metric == "rouge2":
__lowercase = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__lowercase = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__lowercase = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
__lowercase = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
''' function.''' )
__lowercase = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F"val_{metric}" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
return EarlyStopping(
monitor=F"val_{metric}" , mode='''min''' if '''loss''' in metric else '''max''' , patience=lowercase , verbose=lowercase , )
class _UpperCamelCase ( pl.Callback ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
__lowercase = {F"lr_group_{i}": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase__ )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> None:
'''simple docstring'''
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****" )
__lowercase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
__lowercase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowercase = od / '''test_results.txt'''
__lowercase = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowercase = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
__lowercase = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=lowerCAmelCase__ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''a+''' ) as writer:
for key in sorted(lowerCAmelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowercase = metrics[key]
if isinstance(lowerCAmelCase__ , torch.Tensor ):
__lowercase = val.item()
__lowercase = F"{key}: {val:.6f}\n"
writer.write(lowerCAmelCase__ )
if not save_generations:
return
if "preds" in metrics:
__lowercase = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(lowerCAmelCase__ )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
try:
__lowercase = pl_module.model.model.num_parameters()
except AttributeError:
__lowercase = pl_module.model.num_parameters()
__lowercase = count_trainable_parameters(lowerCAmelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase__ , lowerCAmelCase__ , '''test''' )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 534 | import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCAmelCase__ , speech_processor=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ = "auto" ) -> Dict:
'''simple docstring'''
if slice_size == "auto":
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
self.enable_attention_slicing(lowerCAmelCase__ )
@torch.no_grad()
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__=1_60_00 , lowerCAmelCase__ = 5_12 , lowerCAmelCase__ = 5_12 , lowerCAmelCase__ = 50 , lowerCAmelCase__ = 7.5 , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , **lowerCAmelCase__ , ) -> List[str]:
'''simple docstring'''
__lowercase = self.speech_processor.feature_extractor(
lowerCAmelCase__ , return_tensors='''pt''' , sampling_rate=lowerCAmelCase__ ).input_features.to(self.device )
__lowercase = self.speech_model.generate(lowerCAmelCase__ , max_length=48_00_00 )
__lowercase = self.speech_processor.tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , normalize=lowerCAmelCase__ )[
0
]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowercase = 1
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowercase = len(lowerCAmelCase__ )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase__ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowerCAmelCase__ )}." )
# get prompt text embeddings
__lowercase = self.tokenizer(
lowerCAmelCase__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
__lowercase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowercase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__lowercase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowercase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__lowercase , __lowercase , __lowercase = text_embeddings.shape
__lowercase = text_embeddings.repeat(1 , lowerCAmelCase__ , 1 )
__lowercase = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCAmelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowercase = 42
if negative_prompt is None:
__lowercase = [''''''] * batch_size
elif type(lowerCAmelCase__ ) is not type(lowerCAmelCase__ ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase__ )} !="
F" {type(lowerCAmelCase__ )}." )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowercase = [negative_prompt]
elif batch_size != len(lowerCAmelCase__ ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase__ )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
''' the batch size of `prompt`.''' )
else:
__lowercase = negative_prompt
__lowercase = text_input_ids.shape[-1]
__lowercase = self.tokenizer(
lowerCAmelCase__ , padding='''max_length''' , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='''pt''' , )
__lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowercase = uncond_embeddings.shape[1]
__lowercase = uncond_embeddings.repeat(1 , lowerCAmelCase__ , 1 )
__lowercase = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCAmelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__lowercase = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device='''cpu''' , dtype=lowerCAmelCase__ ).to(
self.device )
else:
__lowercase = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
__lowercase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__lowercase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
__lowercase , __lowercase = noise_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = 1 / 0.1_8215 * latents
__lowercase = self.vae.decode(lowerCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCAmelCase__ , nsfw_content_detected=lowerCAmelCase__ ) | 534 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A : Any = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 75 |
# Imports
import numpy as np
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Any:
self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]:
if red is not None:
lowerCamelCase_ =red
if green is not None:
lowerCamelCase_ =green
if blue is not None:
lowerCamelCase_ =blue
if red_edge is not None:
lowerCamelCase_ =red_edge
if nir is not None:
lowerCamelCase_ =nir
return True
def _snake_case ( self , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]:
self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ ={
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def _snake_case ( self )-> Optional[Any]:
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case ( self )-> Tuple:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case ( self )-> str:
return self.nir * (self.red / (self.green**2))
def _snake_case ( self )-> Optional[int]:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case ( self )-> Tuple:
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case ( self )-> Dict:
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case ( self )-> List[Any]:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case ( self )-> Tuple:
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case ( self )-> Optional[int]:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case ( self )-> List[str]:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case ( self )-> List[str]:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case ( self )-> Optional[int]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.0_8 , _SCREAMING_SNAKE_CASE=1.2_2 , _SCREAMING_SNAKE_CASE=0.0_3 )-> Any:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case ( self )-> Tuple:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case ( self )-> Any:
return (self.nir / self.green) - 1
def _snake_case ( self )-> Union[str, Any]:
return (self.nir / self.redEdge) - 1
def _snake_case ( self )-> Union[str, Any]:
return (self.red - self.blue) / self.red
def _snake_case ( self )-> Dict:
lowerCamelCase_ =self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case ( self )-> int:
return self.nir - self.green
def _snake_case ( self )-> Dict:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =(2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.1_6 )-> List[Any]:
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.5 )-> Dict:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case ( self )-> int:
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]:
return (self.nir - b) / (a * self.red)
def _snake_case ( self )-> int:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case ( self )-> Optional[Any]:
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case ( self )-> List[str]:
return self.nir / self.red
def _snake_case ( self )-> List[str]:
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case ( self )-> str:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case ( self )-> List[Any]:
return self.green / (self.nir + self.red + self.green)
def _snake_case ( self )-> Dict:
return self.nir / (self.nir + self.red + self.green)
def _snake_case ( self )-> List[str]:
return self.red / (self.nir + self.red + self.green)
def _snake_case ( self )-> int:
return (self.green - self.red) / (self.green + self.red)
def _snake_case ( self )-> str:
return (self.red - self.green) / (self.red + self.green)
def _snake_case ( self )-> str:
lowerCamelCase_ =np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCamelCase_ =np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case ( self )-> List[str]:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case ( self )-> List[Any]:
return self.nir / self.red
def _snake_case ( self )-> Optional[int]:
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case ( self )-> str:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 75 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 477 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
a__ = None
a__ = logging.get_logger(__name__)
a__ = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
a__ = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
a__ = {
"""google/rembert""": 2_56,
}
a__ = """▁"""
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = VOCAB_FILES_NAMES
snake_case_ : Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : str = RemBertTokenizer
def __init__( self : List[str] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Tuple=False , lowerCAmelCase : Union[str, Any]="[CLS]" , lowerCAmelCase : List[Any]="[SEP]" , lowerCAmelCase : List[str]="<unk>" , lowerCAmelCase : int="[SEP]" , lowerCAmelCase : Optional[Any]="<pad>" , lowerCAmelCase : Union[str, Any]="[CLS]" , lowerCAmelCase : Optional[Any]="[MASK]" , **lowerCAmelCase : Optional[int] , ) -> Tuple:
"""simple docstring"""
_snake_case : Optional[Any] = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else mask_token
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , remove_space=lowerCAmelCase , keep_accents=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , **lowerCAmelCase , )
_snake_case : List[str] = do_lower_case
_snake_case : Optional[int] = remove_space
_snake_case : List[Any] = keep_accents
_snake_case : Optional[Any] = vocab_file
_snake_case : Any = False if not self.vocab_file else True
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_snake_case : Any = [self.sep_token_id]
_snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""")
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase)) + [1] + ([0] * len(lowerCAmelCase)) + [1]
return [1] + ([0] * len(lowerCAmelCase)) + [1]
def UpperCamelCase_ ( self : int , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_snake_case : List[str] = [self.sep_token_id]
_snake_case : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase_ ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase):
logger.error("""Vocabulary path ({}) should be a directory""".format(lowerCAmelCase))
return
_snake_case : Optional[Any] = os.path.join(
lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase):
copyfile(self.vocab_file , lowerCAmelCase)
return (out_vocab_file,)
| 477 | 1 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=7 ):
SCREAMING_SNAKE_CASE = None
if token is not None:
SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE = "636036"
SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
SCREAMING_SNAKE_CASE = requests.get(UpperCAmelCase__ , headers=UpperCAmelCase__ ).json()
return result["workflow_runs"]
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = get_daily_ci_runs(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE = workflow_run["id"]
break
return workflow_run_id
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any ):
SCREAMING_SNAKE_CASE = get_last_daily_ci_runs(UpperCAmelCase__ )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE = get_artifacts_links(worflow_run_id=UpperCAmelCase__ , token=UpperCAmelCase__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE = artifacts_links[artifact_name]
download_artifact(
artifact_name=UpperCAmelCase__ , artifact_url=UpperCAmelCase__ , output_dir=UpperCAmelCase__ , token=UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict ):
get_last_daily_ci_artifacts(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = {}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , F"{artifact_name}.zip" )
if os.path.isfile(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = {}
with zipfile.ZipFile(UpperCAmelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCAmelCase__ ):
# read the file
with z.open(UpperCAmelCase__ ) as f:
SCREAMING_SNAKE_CASE = f.read().decode("UTF-8" )
return results
| 647 | # coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_lowerCamelCase : List[Any] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 647 | 1 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCamelCase__ : Tuple = getLogger(__name__)
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : int = 8 , __UpperCAmelCase : int = 10_24 , __UpperCAmelCase : Tuple="val" , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Any=False , __UpperCAmelCase : List[Any]="summarization" , __UpperCAmelCase : Any=None , __UpperCAmelCase : Any=1 , __UpperCAmelCase : Dict = None , __UpperCAmelCase : Any="" , **__UpperCAmelCase : List[Any] , ) -> Dict:
SCREAMING_SNAKE_CASE_ = str(__UpperCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl' , rank=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = Path(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = save_dir.joinpath(f"rank_{local_rank}_output.json" )
torch.cuda.set_device(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase ).cuda()
if fpaa:
SCREAMING_SNAKE_CASE_ = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__UpperCAmelCase , __UpperCAmelCase ) # update config with task specific params
SCREAMING_SNAKE_CASE_ = generate_kwargs.pop('num_beams' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
SCREAMING_SNAKE_CASE_ = num_return_sequences
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(__UpperCAmelCase )
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
SCREAMING_SNAKE_CASE_ = tokenizer.model_max_length
if prefix is None:
SCREAMING_SNAKE_CASE_ = prefix or getattr(model.config , 'prefix' , '' ) or ''
SCREAMING_SNAKE_CASE_ = SeqaSeqDataset(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , max_target_length=10_24 , type_path=__UpperCAmelCase , n_obs=__UpperCAmelCase , prefix=__UpperCAmelCase , **__UpperCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
SCREAMING_SNAKE_CASE_ = ds.make_sortish_sampler(__UpperCAmelCase , distributed=__UpperCAmelCase , add_extra_examples=__UpperCAmelCase , shuffle=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = DataLoader(__UpperCAmelCase , sampler=__UpperCAmelCase , batch_size=__UpperCAmelCase , collate_fn=ds.collate_fn )
SCREAMING_SNAKE_CASE_ = []
for batch in tqdm(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = model.generate(
input_ids=batch['input_ids'].to(model.device ) , attention_mask=batch['attention_mask'].to(model.device ) , num_return_sequences=__UpperCAmelCase , num_beams=__UpperCAmelCase , **__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = batch['ids']
if num_return_sequences > 1:
SCREAMING_SNAKE_CASE_ = chunks(__UpperCAmelCase , __UpperCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__UpperCAmelCase ):
results.append({'pred': pred, 'id': ids[i].item()} )
save_json(__UpperCAmelCase , __UpperCAmelCase )
return results, sampler.num_replicas
def UpperCAmelCase_ ( ) -> Dict:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' )
parser.add_argument('--data_dir' , type=__UpperCAmelCase , help='like cnn_dm/test.source' )
parser.add_argument(
'--model_name' , type=__UpperCAmelCase , help='like facebook/bart-large-cnn,t5-base, etc.' , default='sshleifer/distilbart-xsum-12-3' , )
parser.add_argument('--save_dir' , type=__UpperCAmelCase , help='where to save' , default='tmp_gen' )
parser.add_argument('--max_source_length' , type=__UpperCAmelCase , default=__UpperCAmelCase )
parser.add_argument(
'--type_path' , type=__UpperCAmelCase , default='test' , help='which subset to evaluate typically train/val/test' )
parser.add_argument('--task' , type=__UpperCAmelCase , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=__UpperCAmelCase , default=8 , required=__UpperCAmelCase , help='batch size' )
parser.add_argument(
'--local_rank' , type=__UpperCAmelCase , default=-1 , required=__UpperCAmelCase , help='should be passed by distributed.launch' )
parser.add_argument(
'--n_obs' , type=__UpperCAmelCase , default=__UpperCAmelCase , required=__UpperCAmelCase , help='How many observations. Defaults to all.' )
parser.add_argument(
'--num_return_sequences' , type=__UpperCAmelCase , default=1 , required=__UpperCAmelCase , help='How many sequences to return' )
parser.add_argument(
'--sync_timeout' , type=__UpperCAmelCase , default=6_00 , required=__UpperCAmelCase , help='How long should master process wait for other processes to finish.' , )
parser.add_argument('--src_lang' , type=__UpperCAmelCase , default=__UpperCAmelCase , required=__UpperCAmelCase )
parser.add_argument('--tgt_lang' , type=__UpperCAmelCase , default=__UpperCAmelCase , required=__UpperCAmelCase )
parser.add_argument(
'--prefix' , type=__UpperCAmelCase , required=__UpperCAmelCase , default=__UpperCAmelCase , help='will be added to the begininng of src examples' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--debug' , action='store_true' )
SCREAMING_SNAKE_CASE_ = time.time()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = parser.parse_known_args()
SCREAMING_SNAKE_CASE_ = parse_numeric_n_bool_cl_kwargs(__UpperCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(f"parsed the following generate kwargs: {generate_kwargs}" )
SCREAMING_SNAKE_CASE_ = Path(args.save_dir + '_tmp' )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase ) # this handles locking.
SCREAMING_SNAKE_CASE_ = list(json_save_dir.glob('rank_*.json' ) )
if intermediate_files:
raise ValueError(f"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
SCREAMING_SNAKE_CASE_ = {}
if args.src_lang is not None:
SCREAMING_SNAKE_CASE_ = args.src_lang
if args.tgt_lang is not None:
SCREAMING_SNAKE_CASE_ = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = eval_data_dir(
args.data_dir , __UpperCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__UpperCAmelCase , **__UpperCAmelCase , )
if args.local_rank <= 0:
SCREAMING_SNAKE_CASE_ = Path(args.save_dir )
save_dir.mkdir(exist_ok=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = gather_results_from_each_node(__UpperCAmelCase , __UpperCAmelCase , args.sync_timeout )
SCREAMING_SNAKE_CASE_ = combine_partial_results(__UpperCAmelCase )
if args.num_return_sequences > 1:
SCREAMING_SNAKE_CASE_ = save_dir.joinpath('pseudolabel_results.json' )
print(f"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(__UpperCAmelCase , __UpperCAmelCase )
return
SCREAMING_SNAKE_CASE_ = Path(args.data_dir ).joinpath(args.type_path + '.target' )
with open(__UpperCAmelCase ) as f:
SCREAMING_SNAKE_CASE_ = [x.rstrip() for x in f.readlines()][: len(__UpperCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
SCREAMING_SNAKE_CASE_ = 'translation' in args.task
SCREAMING_SNAKE_CASE_ = calculate_bleu if calc_bleu else calculate_rouge
SCREAMING_SNAKE_CASE_ = 'bleu' if calc_bleu else 'rouge'
SCREAMING_SNAKE_CASE_ = score_fn(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = time.time() - start_time
SCREAMING_SNAKE_CASE_ = round(runtime / metrics['n_obs'] , 4 )
SCREAMING_SNAKE_CASE_ = num_replicas
# TODO(@stas00): add whatever metadata to metrics
SCREAMING_SNAKE_CASE_ = save_dir.joinpath(f"{args.type_path}_{metric_name}.json" )
save_json(__UpperCAmelCase , __UpperCAmelCase , indent=__UpperCAmelCase )
print(__UpperCAmelCase )
write_txt_file(__UpperCAmelCase , save_dir.joinpath(f"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(__UpperCAmelCase , save_dir.joinpath(f"{args.type_path}.target" ) )
else:
shutil.rmtree(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> List:
SCREAMING_SNAKE_CASE_ = []
for partial_result in partial_results:
records.extend(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x["id"] )
SCREAMING_SNAKE_CASE_ = [x['pred'] for x in records]
return preds
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] ) -> List[Dict[str, List]]:
# WAIT FOR lots of .json files
SCREAMING_SNAKE_CASE_ = time.time()
logger.info('waiting for all nodes to finish' )
SCREAMING_SNAKE_CASE_ = None
while (time.time() - start_wait) < timeout:
SCREAMING_SNAKE_CASE_ = list(save_dir.glob('rank_*.json' ) )
if len(__UpperCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
SCREAMING_SNAKE_CASE_ = lmap(__UpperCAmelCase , __UpperCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate() | 31 |
def __a ( SCREAMING_SNAKE_CASE ) -> list:
'''simple docstring'''
__UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
if n_element < 1:
__UpperCAmelCase = ValueError('''a should be a positive number''' )
raise my_error
__UpperCAmelCase = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = (0, 0, 0)
__UpperCAmelCase = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
A_ : List[str] = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
A_ : Union[str, Any] = hamming(int(n))
print('-----------------------------------------------------')
print(F"""The list with nth numbers is: {hamming_numbers}""")
print('-----------------------------------------------------')
| 303 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """dandelin/vilt-b32-finetuned-vqa"""
_lowerCAmelCase = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
_lowerCAmelCase = """image_qa"""
_lowerCAmelCase = AutoProcessor
_lowerCAmelCase = AutoModelForVisualQuestionAnswering
_lowerCAmelCase = ["""image""", """text"""]
_lowerCAmelCase = ["""text"""]
def __init__( self , *__magic_name__ , **__magic_name__ ) -> Tuple:
requires_backends(self , ['vision'] )
super().__init__(*__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Tuple:
return self.pre_processor(__magic_name__ , __magic_name__ , return_tensors='pt' )
def __UpperCAmelCase ( self , __magic_name__ ) -> Any:
with torch.no_grad():
return self.model(**__magic_name__ ).logits
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[int]:
_a = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 532 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
a_ : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=_SCREAMING_SNAKE_CASE )
class a :
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
@dataclass(frozen=_SCREAMING_SNAKE_CASE )
class a :
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 42
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = False , ) -> Optional[int]:
_a = hans_processors[task]()
_a = os.path.join(
__magic_name__ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(__magic_name__ ) , __magic_name__ , ) , )
_a = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_a , _a = label_list[2], label_list[1]
_a = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a = cached_features_file + '.lock'
with FileLock(__magic_name__ ):
if os.path.exists(__magic_name__ ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
_a = torch.load(__magic_name__ )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
_a = (
processor.get_dev_examples(__magic_name__ ) if evaluate else processor.get_train_examples(__magic_name__ )
)
logger.info('Training examples: %s' , len(__magic_name__ ) )
_a = hans_convert_examples_to_features(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
logger.info('Saving features into cached file %s' , __magic_name__ )
torch.save(self.features , __magic_name__ )
def __len__( self ) -> List[Any]:
return len(self.features )
def __getitem__( self , __magic_name__ ) -> InputFeatures:
return self.features[i]
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class a :
_lowerCAmelCase = 42
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 1_28 , __magic_name__=False , __magic_name__ = False , ) -> str:
_a = hans_processors[task]()
_a = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_a , _a = label_list[2], label_list[1]
_a = label_list
_a = processor.get_dev_examples(__magic_name__ ) if evaluate else processor.get_train_examples(__magic_name__ )
_a = hans_convert_examples_to_features(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(__magic_name__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
_a = tf.data.Dataset.from_generator(
__magic_name__ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def __UpperCAmelCase ( self ) -> Tuple:
return self.dataset
def __len__( self ) -> Optional[Any]:
return len(self.features )
def __getitem__( self , __magic_name__ ) -> InputFeatures:
return self.features[i]
def __UpperCAmelCase ( self ) -> List[Any]:
return self.label_list
class a ( _SCREAMING_SNAKE_CASE ):
def __UpperCAmelCase ( self , __magic_name__ ) -> Tuple:
return self._create_examples(self._read_tsv(os.path.join(__magic_name__ , 'heuristics_train_set.txt' ) ) , 'train' )
def __UpperCAmelCase ( self , __magic_name__ ) -> List[Any]:
return self._create_examples(self._read_tsv(os.path.join(__magic_name__ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def __UpperCAmelCase ( self ) -> Tuple:
return ["contradiction", "entailment", "neutral"]
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Optional[Any]:
_a = []
for i, line in enumerate(__magic_name__ ):
if i == 0:
continue
_a = '%s-%s' % (set_type, line[0])
_a = line[5]
_a = line[6]
_a = line[7][2:] if line[7].startswith('ex' ) else line[7]
_a = line[0]
examples.append(InputExample(guid=__magic_name__ , text_a=__magic_name__ , text_b=__magic_name__ , label=__magic_name__ , pairID=__magic_name__ ) )
return examples
def _A (lowerCAmelCase__ :List[InputExample] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int , lowerCAmelCase__ :PreTrainedTokenizer , ) -> Tuple:
'''simple docstring'''
_a = {label: i for i, label in enumerate(lowerCAmelCase__ )}
_a = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCAmelCase__ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
_a = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='max_length' , truncation=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , )
_a = label_map[example.label] if example.label in label_map else 0
_a = int(example.pairID )
features.append(InputFeatures(**lowerCAmelCase__ , label=lowerCAmelCase__ , pairID=lowerCAmelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
a_ : Optional[int] = {
"hans": 3,
}
a_ : Optional[Any] = {
"hans": HansProcessor,
}
| 532 | 1 |
from __future__ import annotations
__UpperCamelCase : Optional[Any] = list[list[int]]
# assigning initial values to the grid
__UpperCamelCase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCamelCase : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def snake_case ( lowerCamelCase ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if location := find_empty_location(lowerCamelCase ):
__lowercase , __lowercase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__lowercase = digit
if sudoku(lowerCamelCase ) is not None:
return grid
__lowercase = 0
return None
def snake_case ( lowerCamelCase ):
'''simple docstring'''
for row in grid:
for cell in row:
print(lowerCamelCase , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
__UpperCamelCase : Optional[Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 80 |
def _lowerCamelCase ( lowerCamelCase_: int ):
'''simple docstring'''
A : Any = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _lowerCamelCase ( lowerCamelCase_: int = 100 ):
'''simple docstring'''
A : Dict = 1
A : Union[str, Any] = 2
for i in range(2 , max_n + 1 ):
A : List[Any] = pre_numerator
A : Tuple = 2 * i // 3 if i % 3 == 0 else 1
A : str = cur_numerator
A : str = e_cont * pre_numerator + temp
return sum_digits(lowerCamelCase_ )
if __name__ == "__main__":
print(F'''{solution() = }''') | 256 | 0 |
lowerCAmelCase_ = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 669 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] )-> Optional[int]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_snake_case : Tuple = TOKENIZER_CLASSES
else:
_snake_case : Union[str, Any] = {tokenizer_name: getattr(lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_snake_case : Dict = TOKENIZER_CLASSES[tokenizer_name]
_snake_case : Optional[Any] = True
if checkpoint_name is None:
_snake_case : Union[str, Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_snake_case : Optional[int] = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_snake_case : str = tokenizer_class.from_pretrained(lowerCAmelCase , force_download=lowerCAmelCase )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_snake_case , _snake_case : Tuple = checkpoint.split('/' )
_snake_case : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
elif add_prefix:
_snake_case : Dict = checkpoint
_snake_case : Optional[Any] = dump_path
else:
_snake_case : str = None
_snake_case : Union[str, Any] = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_snake_case : Optional[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_snake_case : Optional[int] = file_path.split(lowerCAmelCase )[-1][0]
if next_char == "/":
_snake_case : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
_snake_case : str = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_snake_case : Optional[int] = tokenizer.save_pretrained(
lowerCAmelCase , legacy_format=lowerCAmelCase , filename_prefix=lowerCAmelCase )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCAmelCase )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCAmelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 1 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
UpperCamelCase__ = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
UpperCAmelCase__ : List[Any] = self.transformer_dir
shutil.copy(
os.path.join(__snake_case , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def lowercase_ ( self : List[str] , _A : Optional[Any] , _A : int , _A : List[Any] , _A : str=None ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
UpperCAmelCase__ : List[Any] = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
UpperCAmelCase__ : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
UpperCAmelCase__ : Optional[Any] = black.format_str(__snake_case , mode=__snake_case )
UpperCAmelCase__ : Any = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(__snake_case , '''w''' , newline='''\n''' ) as f:
f.write(__snake_case )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__snake_case ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__snake_case )
with open(__snake_case , '''r''' ) as f:
self.assertTrue(f.read() , __snake_case )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(__snake_case , __snake_case )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __snake_case , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __snake_case ) , )
# Copy consistency with a really long name
UpperCAmelCase__ : Any = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub('''Bert''' , __snake_case , __snake_case ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __snake_case , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __snake_case ) , )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
UpperCAmelCase__ : Optional[Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
UpperCAmelCase__ : Optional[int] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
UpperCAmelCase__ : int = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = check_copies.convert_to_localized_md(
__snake_case , __snake_case , localized_readme['''format_model_list'''] )
self.assertFalse(__snake_case )
self.assertEqual(__snake_case , __snake_case )
UpperCAmelCase__ , UpperCAmelCase__ : Any = check_copies.convert_to_localized_md(
__snake_case , __snake_case , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__snake_case )
UpperCAmelCase__ : str = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
UpperCAmelCase__ : List[Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
UpperCAmelCase__ : Any = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = check_copies.convert_to_localized_md(
__snake_case , __snake_case , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(__snake_case , __snake_case )
| 75 |
def UpperCAmelCase_ ( _UpperCAmelCase :list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) < 2:
return collection
def circle_sort_util(_UpperCAmelCase :list , _UpperCAmelCase :int , _UpperCAmelCase :int ) -> bool:
A_ = False
if low == high:
return swapped
A_ = low
A_ = high
while left < right:
if collection[left] > collection[right]:
A_ , A_ = (
collection[right],
collection[left],
)
A_ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
A_ , A_ = (
collection[right + 1],
collection[left],
)
A_ = True
A_ = low + int((high - low) / 2 )
A_ = circle_sort_util(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A_ = circle_sort_util(_UpperCAmelCase , mid + 1 , _UpperCAmelCase )
return swapped or left_swap or right_swap
A_ = True
while is_not_sorted is True:
A_ = circle_sort_util(_UpperCAmelCase , 0 , len(_UpperCAmelCase ) - 1 )
return collection
if __name__ == "__main__":
a__ : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
a__ : List[str] = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 188 | 0 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : List[Any] = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class __snake_case ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = AlbertTokenizer
SCREAMING_SNAKE_CASE__ = AlbertTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = AlbertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = 'this is a test'
lowerCAmelCase__ = 'this is a test'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = '<pad>'
lowerCAmelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) ,a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<pad>' )
self.assertEqual(vocab_keys[1] ,'<unk>' )
self.assertEqual(vocab_keys[-1] ,'▁eloquent' )
self.assertEqual(len(a_ ) ,3_0000 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size ,3_0000 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ = tokenizer.tokenize(a_ )
lowerCAmelCase__ = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ ,a_ )
lowerCAmelCase__ = tokenizer.encode(a_ ,add_special_tokens=a_ )
lowerCAmelCase__ = rust_tokenizer.encode(a_ ,add_special_tokens=a_ )
self.assertListEqual(a_ ,a_ )
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = tokenizer.encode(a_ )
lowerCAmelCase__ = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = AlbertTokenizer(a_ ,keep_accents=a_ )
lowerCAmelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(a_ ,['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) ,[48, 25, 21, 1289] )
lowerCAmelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
a_ ,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(a_ ,[31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ ,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] ,)
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = AlbertTokenizer(a_ )
lowerCAmelCase__ = tokenizer.encode('sequence builders' )
lowerCAmelCase__ = tokenizer.encode('multi-sequence build' )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(a_ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(a_ ,a_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# fmt: off
lowerCAmelCase__ = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ ,model_name='albert-base-v2' ,revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' ,)
| 604 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class __snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] ,dtype=tf.floataa ,)
lowerCAmelCase__ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] ,dtype=tf.intaa ,) # expected non filtered idx as noted above
lowerCAmelCase__ = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] ,dtype=tf.floataa ,) # expected non filtered values as noted above
lowerCAmelCase__ = tf_top_k_top_p_filtering(a_ ,top_k=10 ,top_p=0.6 ,min_tokens_to_keep=4 )
lowerCAmelCase__ = output[output != -float('inf' )]
lowerCAmelCase__ = tf.cast(
tf.where(tf.not_equal(a_ ,tf.constant(-float('inf' ) ,dtype=tf.floataa ) ) ) ,dtype=tf.intaa ,)
tf.debugging.assert_near(a_ ,a_ ,rtol=1e-1_2 )
tf.debugging.assert_equal(a_ ,a_ )
@require_tf
class __snake_case ( unittest.TestCase , SCREAMING_SNAKE_CASE ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
SCREAMING_SNAKE_CASE__ = {
'AutoModelForCausalLM': TFAutoModelForCausalLM,
'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq,
'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM,
'AutoModelForVision2Seq': TFAutoModelForVisionaSeq,
'LogitsProcessorList': TFLogitsProcessorList,
'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor,
'create_tensor_fn': tf.convert_to_tensor,
'floats_tensor': floats_tensor,
'return_tensors': 'tf',
}
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# TF-only test: tf.saved_model export
lowerCAmelCase__ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase__ = 2
lowerCAmelCase__ = 2
class __snake_case ( tf.Module ):
def __init__( self ,a_ ):
"""simple docstring"""
super(a_ ,self ).__init__()
lowerCAmelCase__ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) ,tf.intaa ,name='input_ids' ),
tf.TensorSpec((None, input_length) ,tf.intaa ,name='attention_mask' ),
) ,jit_compile=a_ ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.model.generate(
input_ids=a_ ,attention_mask=a_ ,max_new_tokens=a_ ,return_dict_in_generate=a_ ,)
return {"sequences": outputs["sequences"]}
lowerCAmelCase__ = [[2, 0], [102, 103]]
lowerCAmelCase__ = [[1, 0], [1, 1]]
lowerCAmelCase__ = DummyModel(model=a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a_ ,a_ ,signatures={'serving_default': dummy_model.serving} )
lowerCAmelCase__ = tf.saved_model.load(a_ ).signatures['serving_default']
for batch_size in range(1 ,len(a_ ) + 1 ):
lowerCAmelCase__ = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
lowerCAmelCase__ = serving_func(**a_ )['sequences']
lowerCAmelCase__ = test_model.generate(**a_ ,max_new_tokens=a_ )
tf.debugging.assert_equal(a_ ,a_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# TF-only test: tf.saved_model export
lowerCAmelCase__ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
class __snake_case ( tf.Module ):
def __init__( self ,a_ ):
"""simple docstring"""
super(a_ ,self ).__init__()
lowerCAmelCase__ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) ,tf.intaa ,name='input_ids' ),
tf.TensorSpec((batch_size, None) ,tf.intaa ,name='attention_mask' ),
) ,jit_compile=a_ ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.model.generate(
input_ids=a_ ,attention_mask=a_ ,max_new_tokens=a_ ,return_dict_in_generate=a_ ,)
return {"sequences": outputs["sequences"]}
lowerCAmelCase__ = [[2], [102, 103]]
lowerCAmelCase__ = [[1], [1, 1]]
lowerCAmelCase__ = DummyModel(model=a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a_ ,a_ ,signatures={'serving_default': dummy_model.serving} )
lowerCAmelCase__ = tf.saved_model.load(a_ ).signatures['serving_default']
for input_row in range(len(a_ ) ):
lowerCAmelCase__ = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
lowerCAmelCase__ = serving_func(**a_ )['sequences']
lowerCAmelCase__ = test_model.generate(**a_ ,max_new_tokens=a_ )
tf.debugging.assert_equal(a_ ,a_ )
@slow
@require_tensorflow_text
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' ,filename='spiece.model' ,local_dir=a_ )
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self ):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(a_ ,'spiece.model' ) ,'rb' ).read() )
lowerCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,*a_ ,**a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer.tokenize(a_ )
lowerCAmelCase__ , lowerCAmelCase__ = text.pad_model_inputs(
a_ ,max_seq_length=64 ,pad_value=self.model.config.pad_token_id )
lowerCAmelCase__ = self.model.generate(input_ids=a_ ,attention_mask=a_ )
return self.tokenizer.detokenize(a_ )
lowerCAmelCase__ = CompleteSentenceTransformer()
lowerCAmelCase__ = tf.keras.layers.Input(shape=(1,) ,dtype=tf.string ,name='inputs' )
lowerCAmelCase__ = complete_model(a_ )
lowerCAmelCase__ = tf.keras.Model(a_ ,a_ )
keras_model.save(a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# Has PT equivalent: this test relies on random sampling
lowerCAmelCase__ = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
lowerCAmelCase__ = 14
lowerCAmelCase__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase__ = 'Hello, my dog is cute and'
lowerCAmelCase__ = tokenizer(a_ ,return_tensors='tf' )
lowerCAmelCase__ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase__ = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
lowerCAmelCase__ = model.generate(**a_ ,eos_token_id=a_ ,**a_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowerCAmelCase__ = [638, 198]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
lowerCAmelCase__ = model.generate(**a_ ,eos_token_id=a_ ,**a_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# Has PT equivalent: ample use of framework-specific code
lowerCAmelCase__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowerCAmelCase__ = 'Hugging Face is a technology company based in New York and Paris.'
lowerCAmelCase__ = bart_tokenizer(a_ ,return_tensors='tf' ).input_ids
lowerCAmelCase__ = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowerCAmelCase__ = bart_model.generate(a_ ).numpy()
class __snake_case ( SCREAMING_SNAKE_CASE ):
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_=None ,**a_ ):
"""simple docstring"""
return super().call(a_ ,**a_ )
lowerCAmelCase__ = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowerCAmelCase__ = bart_model.generate(a_ ,foo='bar' ).numpy()
self.assertTrue(np.array_equal(a_ ,a_ ) )
class __snake_case ( bart_model.model.encoder.__class__ ):
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,**a_ ):
"""simple docstring"""
return super().call(a_ ,**a_ )
lowerCAmelCase__ = FakeEncoder(bart_model.config ,bart_model.model.shared )
lowerCAmelCase__ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowerCAmelCase__ = bart_model.generate(a_ ).numpy()
with self.assertRaises(a_ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(a_ ,foo='bar' )
| 604 | 1 |
"""simple docstring"""
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ = torch.nn.Linear(1_0 , 1_0 )
SCREAMING_SNAKE_CASE__ = torch.optim.SGD(model.parameters() , 0.1 )
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = accelerator.prepare(__UpperCAmelCase )
try:
pickle.loads(pickle.dumps(__UpperCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 196 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a : List[str] = {
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Dict = ["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Any = ["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Tuple = [
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Union[str, Any] = [
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__a : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 706 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a : Any = logging.get_logger(__name__)
__a : Dict = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Optional[Any] = '''yolos'''
def __init__( self , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=[5_12, 8_64] , lowerCAmelCase__=16 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=1_00 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=1 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , **lowerCAmelCase__ , ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = num_detection_tokens
__lowercase = use_mid_position_embeddings
__lowercase = auxiliary_loss
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : int = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self ) -> float:
'''simple docstring'''
return 1E-4
@property
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return 12 | 522 | 0 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
UpperCamelCase_ : List[str] = '''src/transformers'''
UpperCamelCase_ : Optional[Any] = '''docs/source/en/tasks'''
def __a ( _UpperCamelCase: Optional[Any] , _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Any ) -> int:
"""simple docstring"""
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
_snake_case = f.readlines()
# Find the start prompt.
_snake_case = 0
while not lines[start_index].startswith(_UpperCamelCase ):
start_index += 1
start_index += 1
_snake_case = start_index
while not lines[end_index].startswith(_UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase_ : str = direct_transformers_import(TRANSFORMERS_PATH)
UpperCamelCase_ : Optional[int] = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
UpperCamelCase_ : Any = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __a ( _UpperCamelCase: str ) -> Dict:
"""simple docstring"""
_snake_case = TASK_GUIDE_TO_MODELS[task_guide]
_snake_case = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_UpperCamelCase , set() )
_snake_case = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def __a ( _UpperCamelCase: Tuple , _UpperCamelCase: Optional[Any]=False ) -> Any:
"""simple docstring"""
_snake_case , _snake_case , _snake_case , _snake_case = _find_text_in_file(
filename=os.path.join(_UpperCamelCase , _UpperCamelCase ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
_snake_case = get_model_list_for_task(_UpperCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_UpperCamelCase , _UpperCamelCase ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
" to fix this." )
if __name__ == "__main__":
UpperCamelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCamelCase_ : Dict = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 185 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
UpperCamelCase_ : Tuple = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
UpperCamelCase_ : Optional[Any] = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
UpperCamelCase_ : Dict = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __a ( _UpperCamelCase: Tuple , _UpperCamelCase: int ) -> Optional[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def __a ( _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Optional[int] , _UpperCamelCase: int="binary" ) -> Any:
"""simple docstring"""
_snake_case = simple_accuracy(_UpperCamelCase , _UpperCamelCase )
_snake_case = float(fa_score(y_true=_UpperCamelCase , y_pred=_UpperCamelCase , average=_UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __a ( _UpperCamelCase: str , _UpperCamelCase: Any ) -> Tuple:
"""simple docstring"""
_snake_case = {}
for id_pred, label in zip(_UpperCamelCase , _UpperCamelCase ):
_snake_case = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_snake_case = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_snake_case = [(pred, label)]
_snake_case , _snake_case = [], []
for question, preds_labels in question_map.items():
_snake_case , _snake_case = zip(*_UpperCamelCase )
_snake_case = fa_score(y_true=_UpperCamelCase , y_pred=_UpperCamelCase , average="macro" )
fas.append(_UpperCamelCase )
_snake_case = int(sum(pred == label for pred, label in preds_labels ) == len(_UpperCamelCase ) )
ems.append(_UpperCamelCase )
_snake_case = float(sum(_UpperCamelCase ) / len(_UpperCamelCase ) )
_snake_case = sum(_UpperCamelCase ) / len(_UpperCamelCase )
_snake_case = float(fa_score(y_true=_UpperCamelCase , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def _lowercase ( self ) -> Union[str, Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,codebase_urls=[] ,reference_urls=[] ,format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None ,)
def _lowercase ( self ) -> Union[str, Any]:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}
elif self.config_name == "cb":
return acc_and_fa(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,fa_avg="macro" )
elif self.config_name == "record":
_snake_case = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
_snake_case = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 185 | 1 |
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=13 , UpperCamelCase_ : Tuple=7 , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Union[str, Any]=99 , UpperCamelCase_ : Optional[int]=24 , UpperCamelCase_ : int=2 , UpperCamelCase_ : List[str]=6 , UpperCamelCase_ : int=37 , UpperCamelCase_ : Tuple="gelu" , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : List[str]=512 , UpperCamelCase_ : Any=16 , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : Any=0.0_2 , UpperCamelCase_ : Optional[Any]=3 , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : List[Any]=1000 , ):
lowerCAmelCase_ : Any =parent
lowerCAmelCase_ : Any =batch_size
lowerCAmelCase_ : Optional[Any] =seq_length
lowerCAmelCase_ : Tuple =is_training
lowerCAmelCase_ : str =use_input_mask
lowerCAmelCase_ : Union[str, Any] =use_token_type_ids
lowerCAmelCase_ : Any =use_labels
lowerCAmelCase_ : Tuple =vocab_size
lowerCAmelCase_ : Any =hidden_size
lowerCAmelCase_ : Union[str, Any] =num_hidden_layers
lowerCAmelCase_ : Dict =num_attention_heads
lowerCAmelCase_ : Optional[Any] =intermediate_size
lowerCAmelCase_ : str =hidden_act
lowerCAmelCase_ : Union[str, Any] =hidden_dropout_prob
lowerCAmelCase_ : Optional[int] =attention_probs_dropout_prob
lowerCAmelCase_ : Any =max_position_embeddings
lowerCAmelCase_ : str =type_vocab_size
lowerCAmelCase_ : Optional[Any] =type_sequence_label_size
lowerCAmelCase_ : Optional[int] =initializer_range
lowerCAmelCase_ : Optional[int] =num_labels
lowerCAmelCase_ : List[str] =scope
lowerCAmelCase_ : List[str] =range_bbox
def __A ( self : Any ):
lowerCAmelCase_ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Any =ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase_ : Union[str, Any] =bbox[i, j, 3]
lowerCAmelCase_ : Any =bbox[i, j, 1]
lowerCAmelCase_ : Any =t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase_ : int =bbox[i, j, 2]
lowerCAmelCase_ : Optional[Any] =bbox[i, j, 0]
lowerCAmelCase_ : int =t
lowerCAmelCase_ : Any =None
if self.use_input_mask:
lowerCAmelCase_ : Dict =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase_ : int =None
if self.use_token_type_ids:
lowerCAmelCase_ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : List[Any] =None
lowerCAmelCase_ : Any =None
if self.use_labels:
lowerCAmelCase_ : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Dict =self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __A ( self : str ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __A ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , ):
lowerCAmelCase_ : Any =LiltModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ : List[Any] =model(lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
lowerCAmelCase_ : List[Any] =model(lowercase_ , bbox=lowercase_ , token_type_ids=lowercase_ )
lowerCAmelCase_ : str =model(lowercase_ , bbox=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : int , ):
lowerCAmelCase_ : Union[str, Any] =self.num_labels
lowerCAmelCase_ : Optional[int] =LiltForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ : List[Any] =model(
lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] , ):
lowerCAmelCase_ : Optional[Any] =LiltForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ : str =model(
lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] =self.prepare_config_and_inputs()
(
lowerCAmelCase_
) : int =config_and_inputs
lowerCAmelCase_ : Optional[int] ={
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class _snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Optional[int] = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : List[str] = False
_UpperCamelCase : List[str] = False
def __A ( self : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Any ):
return True
def __A ( self : Tuple ):
lowerCAmelCase_ : List[str] =LiltModelTester(self )
lowerCAmelCase_ : List[Any] =ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def __A ( self : int ):
self.config_tester.run_common_tests()
def __A ( self : List[str] ):
lowerCAmelCase_ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __A ( self : Union[str, Any] ):
lowerCAmelCase_ : Optional[int] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ : List[str] =type
self.model_tester.create_and_check_model(*lowercase_ )
def __A ( self : int ):
lowerCAmelCase_ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
def __A ( self : Tuple ):
lowerCAmelCase_ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
@slow
def __A ( self : str ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Tuple =LiltModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
@slow
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ):
lowerCAmelCase_ : Dict =LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(lowercase_ )
lowerCAmelCase_ : Optional[int] =torch.tensor([[1, 2]] , device=lowercase_ )
lowerCAmelCase_ : Any =torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : List[str] =model(input_ids=lowercase_ , bbox=lowercase_ )
lowerCAmelCase_ : Union[str, Any] =torch.Size([1, 2, 768] )
lowerCAmelCase_ : Dict =torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=lowercase_ , )
self.assertTrue(outputs.last_hidden_state.shape , lowercase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase_ , atol=1E-3 ) )
| 708 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
_UpperCamelCase : UNetaDModel
_UpperCamelCase : ScoreSdeVeScheduler
def __init__( self : Union[str, Any] , UpperCamelCase_ : UNetaDModel , UpperCamelCase_ : ScoreSdeVeScheduler ):
super().__init__()
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase_ : int = 1 , UpperCamelCase_ : int = 2000 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , **UpperCamelCase_ : Dict , ):
lowerCAmelCase_ : Union[str, Any] =self.unet.config.sample_size
lowerCAmelCase_ : Dict =(batch_size, 3, img_size, img_size)
lowerCAmelCase_ : Dict =self.unet
lowerCAmelCase_ : Optional[int] =randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ ) * self.scheduler.init_noise_sigma
lowerCAmelCase_ : Any =sample.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase_ )
self.scheduler.set_sigmas(UpperCamelCase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCAmelCase_ : Optional[Any] =self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowerCAmelCase_ : int =self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
lowerCAmelCase_ : List[str] =self.scheduler.step_correct(UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# prediction step
lowerCAmelCase_ : Any =model(UpperCamelCase_ , UpperCamelCase_ ).sample
lowerCAmelCase_ : Tuple =self.scheduler.step_pred(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple =output.prev_sample, output.prev_sample_mean
lowerCAmelCase_ : Tuple =sample_mean.clamp(0 , 1 )
lowerCAmelCase_ : int =sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ : Optional[Any] =self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 305 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Tuple =VideoToVideoSDPipeline
a : Optional[int] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
a : List[Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
a : int =PipelineTesterMixin.required_optional_params - {"""latents"""}
a : Dict =False
# No `output_type`.
a : List[Any] =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""),up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""),cross_attention_dim=32,attention_head_dim=4,)
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,)
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,)
__lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
__lowerCAmelCase = floats_tensor((1, 3, 3, 32, 32),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""video""": video,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = VideoToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """np"""
__lowerCAmelCase = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__lowerCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__lowerCAmelCase = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(),reason="""XFormers attention is only available with CUDA and `xformers` installed""",)
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=5e-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""",torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = torch.randn((1, 10, 3, 10_24, 5_76),generator=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = video.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,video=__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=3,output_type="""pt""" ).frames
__lowerCAmelCase = np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 689 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_a : Optional[int] = logging.get_logger(__name__)
_a : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_a : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowerCAmelCase_ )} )
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
a : int =field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : int =field(
default=1_28 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
a : int =field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
a : int =field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
a : float =field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
a : int =field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[Any] ="""train"""
a : Optional[int] ="""dev"""
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : SquadDataTrainingArguments
a : List[SquadFeatures]
a : Split
a : bool
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = Split.train,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "pt",):
'''simple docstring'''
__lowerCAmelCase = args
__lowerCAmelCase = is_language_sensitive
__lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
__lowerCAmelCase = mode
# Load data features from cache or dataset file
__lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(__SCREAMING_SNAKE_CASE ):
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCAmelCase = self.old_features["""features"""]
__lowerCAmelCase = self.old_features.get("""dataset""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.old_features.get("""examples""",__SCREAMING_SNAKE_CASE )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
__lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__SCREAMING_SNAKE_CASE,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples},__SCREAMING_SNAKE_CASE,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.features[i]
__lowerCAmelCase = torch.tensor(feature.input_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.attention_mask,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.token_type_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.cls_index,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.p_mask,dtype=torch.float )
__lowerCAmelCase = torch.tensor(feature.is_impossible,dtype=torch.float )
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCAmelCase = torch.tensor(feature.start_position,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 689 | 1 |
'''simple docstring'''
snake_case_ = {str(digit): digit**5 for digit in range(10)}
def __lowercase (_SCREAMING_SNAKE_CASE :int ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_SCREAMING_SNAKE_CASE ) )
def __lowercase ():
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(solution())
| 355 |
'''simple docstring'''
from __future__ import annotations
def __lowercase (_SCREAMING_SNAKE_CASE :int | str ):
SCREAMING_SNAKE_CASE : int = str(_SCREAMING_SNAKE_CASE )
return n == n[::-1]
def __lowercase (_SCREAMING_SNAKE_CASE :int = 1_00_00_00 ):
SCREAMING_SNAKE_CASE : int = 0
for i in range(1 , _SCREAMING_SNAKE_CASE ):
if is_palindrome(_SCREAMING_SNAKE_CASE ) and is_palindrome(bin(_SCREAMING_SNAKE_CASE ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 355 | 1 |
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = (boundary[1] - boundary[0]) / steps
lowerCamelCase_ = boundary[0]
lowerCamelCase_ = boundary[1]
lowerCamelCase_ = make_points(lowercase , lowercase , lowercase )
lowerCamelCase_ = 0.0
y += (h / 2.0) * f(lowercase )
for i in x_i:
# print(i)
y += h * f(lowercase )
y += (h / 2.0) * f(lowercase )
return y
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Union[str, Any] , lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ = a + h
while x < (b - h):
yield x
lowerCamelCase_ = x + h
def _SCREAMING_SNAKE_CASE ( lowercase : str ): # enter your function here
'''simple docstring'''
lowerCamelCase_ = (x - 0) * (x - 0)
return y
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 0.0 # Lower bound of integration
lowerCamelCase_ = 1.0 # Upper bound of integration
lowerCamelCase_ = 10.0 # define number of steps or resolution
lowerCamelCase_ = [a, b] # define boundary of integration
lowerCamelCase_ = method_a(lowercase , lowercase )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 70 |
from collections import Counter
from timeit import timeit
def _SCREAMING_SNAKE_CASE ( lowercase : str = "" , ):
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def _SCREAMING_SNAKE_CASE ( lowercase : str = "" ):
'''simple docstring'''
if len(lowercase ) == 0:
return True
lowerCamelCase_ = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowerCamelCase_ = {}
for character in lower_case_input_str:
lowerCamelCase_ = character_freq_dict.get(lowercase , 0 ) + 1
lowerCamelCase_ = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _SCREAMING_SNAKE_CASE ( lowercase : str = "" ):
'''simple docstring'''
print('\nFor string = ' , lowercase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(lowercase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(lowercase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
lowerCamelCase : int = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 70 | 1 |
import pytest
import datasets
# Import fixture modules as plugins
__a : Union[str, Any] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase )
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = tmp_path_factory.getbasetemp() / '''cache'''
__lowercase = test_hf_cache_home / '''datasets'''
__lowercase = test_hf_cache_home / '''metrics'''
__lowercase = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase ) )
__lowercase = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase ) )
__lowercase = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase ) )
@pytest.fixture(autouse=lowercase , scope='''session''' )
def UpperCAmelCase ( ):
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase )
@pytest.fixture
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase ) | 522 | import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : int = (EulerDiscreteScheduler,)
__a : Any = 10
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
__lowercase = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCAmelCase__ )
return config
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase = torch.manual_seed(0 )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = model(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowerCAmelCase__ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(prediction_type='''v_prediction''' )
__lowercase = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase = torch.manual_seed(0 )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = model(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowerCAmelCase__ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
__lowercase = torch.manual_seed(0 )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowercase = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
__lowercase = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = model(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowerCAmelCase__ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
__lowercase = torch.manual_seed(0 )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowercase = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
__lowercase = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = model(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(lowerCAmelCase__ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3 | 522 | 1 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCAmelCase__ :
"""simple docstring"""
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.get_dummy_input()
@property
def __lowerCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(F'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def __lowerCAmelCase ( self : Dict , A__ : Tuple=True , A__ : List[Any]=False , A__ : List[str]=False , A__ : List[Any]=False , ) -> str:
'''simple docstring'''
a__ : List[Any] = 4
a__ : Dict = 3_2
a__ : int = (3_2, 3_2)
a__ : Union[str, Any] = torch.manual_seed(0 )
a__ : Union[str, Any] = torch.device(A__ )
a__ : Optional[int] = (batch_size, num_channels) + sizes
a__ : Optional[Any] = randn_tensor(A__ , generator=A__ , device=A__ )
a__ : int = {'''hidden_states''': hidden_states}
if include_temb:
a__ : List[Any] = 1_2_8
a__ : List[str] = randn_tensor((batch_size, temb_channels) , generator=A__ , device=A__ )
if include_res_hidden_states_tuple:
a__ : str = torch.manual_seed(1 )
a__ : int = (randn_tensor(A__ , generator=A__ , device=A__ ),)
if include_encoder_hidden_states:
a__ : List[Any] = floats_tensor((batch_size, 3_2, 3_2) ).to(A__ )
if include_skip_sample:
a__ : Optional[Any] = randn_tensor(((batch_size, 3) + sizes) , generator=A__ , device=A__ )
return dummy_input
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
a__ : List[str] = {
'''in_channels''': 3_2,
'''out_channels''': 3_2,
'''temb_channels''': 1_2_8,
}
if self.block_type == "up":
a__ : int = 3_2
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
a__ : Tuple = self.dummy_input
return init_dict, inputs_dict
def __lowerCAmelCase ( self : Dict , A__ : Any ) -> str:
'''simple docstring'''
a__ , a__ : List[Any] = self.prepare_init_args_and_inputs_for_common()
a__ : Union[str, Any] = self.block_class(**A__ )
unet_block.to(A__ )
unet_block.eval()
with torch.no_grad():
a__ : Tuple = unet_block(**A__ )
if isinstance(A__ , A__ ):
a__ : Dict = output[0]
self.assertEqual(output.shape , self.output_shape )
a__ : Optional[int] = output[0, -1, -3:, -3:]
a__ : List[str] = torch.tensor(A__ ).to(A__ )
assert torch_all_close(output_slice.flatten() , A__ , atol=5E-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
a__ , a__ : Dict = self.prepare_init_args_and_inputs_for_common()
a__ : Optional[int] = self.block_class(**A__ )
model.to(A__ )
model.train()
a__ : Dict = model(**A__ )
if isinstance(A__ , A__ ):
a__ : Tuple = output[0]
a__ : Dict = torch.device(A__ )
a__ : Union[str, Any] = randn_tensor(output.shape , device=A__ )
a__ : str = torch.nn.functional.mse_loss(A__ , A__ )
loss.backward()
| 688 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = (KDPMaDiscreteScheduler,)
__UpperCamelCase = 10
def __lowerCAmelCase ( self : Optional[Any] , **A__ : Optional[int] ) -> int:
'''simple docstring'''
a__ : Optional[int] = {
'''num_train_timesteps''': 1_1_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**A__ )
return config
def __lowerCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__ )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=A__ , beta_end=A__ )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A__ )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
a__ : Any = self.scheduler_classes[0]
a__ : str = self.get_scheduler_config(prediction_type='''v_prediction''' )
a__ : Dict = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps )
a__ : Tuple = self.dummy_model()
a__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Dict = sample.to(A__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : Optional[Any] = scheduler.scale_model_input(A__ , A__ )
a__ : Union[str, Any] = model(A__ , A__ )
a__ : List[str] = scheduler.step(A__ , A__ , A__ )
a__ : Optional[Any] = output.prev_sample
a__ : Tuple = torch.sum(torch.abs(A__ ) )
a__ : Optional[int] = torch.mean(torch.abs(A__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
if torch_device == "mps":
return
a__ : List[Any] = self.scheduler_classes[0]
a__ : Tuple = self.get_scheduler_config()
a__ : Tuple = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps )
a__ : List[Any] = self.dummy_model()
a__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Any = sample.to(A__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : str = scheduler.scale_model_input(A__ , A__ )
a__ : List[str] = model(A__ , A__ )
a__ : str = scheduler.step(A__ , A__ , A__ )
a__ : List[Any] = output.prev_sample
a__ : Dict = torch.sum(torch.abs(A__ ) )
a__ : Optional[Any] = torch.mean(torch.abs(A__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def __lowerCAmelCase ( self : str ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
a__ : Optional[int] = self.scheduler_classes[0]
a__ : Tuple = self.get_scheduler_config()
a__ : List[Any] = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps , device=A__ )
a__ : Union[str, Any] = self.dummy_model()
a__ : List[Any] = self.dummy_sample_deter.to(A__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a__ : Optional[int] = scheduler.scale_model_input(A__ , A__ )
a__ : List[Any] = model(A__ , A__ )
a__ : Any = scheduler.step(A__ , A__ , A__ )
a__ : List[str] = output.prev_sample
a__ : Any = torch.sum(torch.abs(A__ ) )
a__ : Union[str, Any] = torch.mean(torch.abs(A__ ) )
if str(A__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 688 | 1 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self: int , snake_case: NestedDataStructureLike[PathLike] , snake_case: Optional[NamedSplit] = None , snake_case: Optional[Features] = None , snake_case: str = None , snake_case: bool = False , snake_case: bool = False , snake_case: Optional[int] = None , **snake_case: Optional[Any] , ) -> Optional[Any]:
super().__init__(
snake_case , split=snake_case , features=snake_case , cache_dir=snake_case , keep_in_memory=snake_case , streaming=snake_case , num_proc=snake_case , **snake_case , )
snake_case_ :Union[str, Any] = path_or_paths if isinstance(snake_case , snake_case ) else {self.split: path_or_paths}
snake_case_ :List[str] = Text(
cache_dir=snake_case , data_files=snake_case , features=snake_case , **snake_case , )
def lowerCAmelCase_ ( self: List[str] ) -> int:
# Build iterable dataset
if self.streaming:
snake_case_ :int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case_ :int = None
snake_case_ :Dict = None
snake_case_ :Optional[Any] = None
snake_case_ :Tuple = None
self.builder.download_and_prepare(
download_config=snake_case , download_mode=snake_case , verification_mode=snake_case , base_path=snake_case , num_proc=self.num_proc , )
snake_case_ :Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=snake_case , in_memory=self.keep_in_memory )
return dataset
| 713 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
__a = get_logger(__name__)
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Any , snake_case: List[Any] , snake_case: List[Any]=None ) -> Union[str, Any]:
snake_case_ :str = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , snake_case , getattr(snake_case , snake_case ) )
snake_case_ :Optional[int] = module._original_module if isinstance(snake_case , _PatchedModuleObj ) else module
class lowerCamelCase :
'''simple docstring'''
_A : Union[str, Any] = []
def __init__( self: Optional[Any] , snake_case: List[str] , snake_case: str , snake_case: int , snake_case: Dict=None ) -> Any:
snake_case_ :Union[str, Any] = obj
snake_case_ :List[str] = target
snake_case_ :Any = new
snake_case_ :Optional[Any] = target.split(""".""" )[0]
snake_case_ :Tuple = {}
snake_case_ :List[str] = attrs or []
def __enter__( self: Dict ) -> Optional[Any]:
*snake_case_, snake_case_ :List[Any] = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(snake_case ) ):
try:
snake_case_ :Optional[int] = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
snake_case_ :Optional[Any] = getattr(self.obj , snake_case )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(snake_case , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
snake_case_ :int = obj_attr
# patch at top level
setattr(self.obj , snake_case , _PatchedModuleObj(snake_case , attrs=self.attrs ) )
snake_case_ :Optional[Any] = getattr(self.obj , snake_case )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(snake_case , snake_case , _PatchedModuleObj(getattr(snake_case , snake_case , snake_case ) , attrs=self.attrs ) )
snake_case_ :int = getattr(snake_case , snake_case )
# finally set the target attribute
setattr(snake_case , snake_case , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
snake_case_ :Tuple = getattr(import_module(""".""".join(snake_case ) ) , snake_case )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , snake_case ) is attr_value:
snake_case_ :Union[str, Any] = getattr(self.obj , snake_case )
setattr(self.obj , snake_case , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
snake_case_ :Dict = globals()["""__builtins__"""][target_attr]
setattr(self.obj , snake_case , self.new )
else:
raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self: List[Any] , *snake_case: List[Any] ) -> int:
for attr in list(self.original ):
setattr(self.obj , snake_case , self.original.pop(snake_case ) )
def lowerCAmelCase_ ( self: List[str] ) -> int:
self.__enter__()
self._active_patches.append(self )
def lowerCAmelCase_ ( self: int ) -> Optional[Any]:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 310 | 0 |
"""simple docstring"""
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __UpperCamelCase ( unittest.TestCase , a__ ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = load_tool('text-to-speech' )
self.tool.setup()
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : str = self.tool('hey' )
_lowerCAmelCase : List[str] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] ,torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) ,) )
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : str = self.tool('hey' )
_lowerCAmelCase : Any = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] ,torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) ,) )
| 259 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
while repunit:
_lowerCAmelCase : List[str] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowerCamelCase__ ( _lowerCamelCase = 1000000 ):
'''simple docstring'''
_lowerCAmelCase : Tuple = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 259 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCAmelCase = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 702 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase = logging.getLogger()
_UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
def _UpperCAmelCase ( self , a__ ):
os.makedirs(a__ , exist_ok=a__ )
_lowerCamelCase = {'''source''': '''What is love ?''', '''target''': '''life'''}
_lowerCamelCase = {'''train''': 12, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_lowerCamelCase = '''\n'''.join([contents[field]] * n_lines[split] )
with open(os.path.join(a__ , f'''{split}.{field}''' ) , '''w''' ) as f:
f.write(a__ )
def _UpperCAmelCase ( self , a__ , a__ = "pytorch" ):
_lowerCamelCase = self.get_auto_remove_tmp_dir()
_lowerCamelCase = os.path.join(a__ , '''output''' )
_lowerCamelCase = os.path.join(a__ , '''data''' )
self._create_dummy_data(data_dir=a__ )
_lowerCamelCase = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append('''--fp16''' )
else:
testargs.append('''--gpus=0''' )
testargs.append('''--distributed_backend=ddp_cpu''' )
testargs.append('''--num_processes=2''' )
_lowerCamelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(a__ , env=self.get_env() )
_lowerCamelCase = os.path.join(a__ , '''metrics.json''' )
with open(a__ ) as f:
_lowerCamelCase = json.load(a__ )
return result
@require_torch_gpu
def _UpperCAmelCase ( self ):
_lowerCamelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
def _UpperCAmelCase ( self ):
_lowerCamelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_gpu
@require_ray
def _UpperCAmelCase ( self ):
_lowerCamelCase = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _UpperCAmelCase ( self ):
_lowerCamelCase = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
| 297 | 0 |
A : Tuple = [0, 2, 4, 6, 8]
A : Dict = [1, 3, 5, 7, 9]
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : int ) -> int:
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowercase__ = 0
for digit in range(10 ):
lowercase__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , __magic_name__ , __magic_name__ )
return result
lowercase__ = 0
for digita in range(10 ):
lowercase__ = digita
if (remainder + digita) % 2 == 0:
lowercase__ = ODD_DIGITS
else:
lowercase__ = EVEN_DIGITS
for digita in other_parity_digits:
lowercase__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , __magic_name__ , __magic_name__ , )
return result
def UpperCamelCase ( __magic_name__ : int = 9 ) -> int:
"""simple docstring"""
lowercase__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__magic_name__ , 0 , [0] * length , __magic_name__ )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 15 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
SCREAMING_SNAKE_CASE : Tuple = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
SCREAMING_SNAKE_CASE : Tuple = "</w>"
SCREAMING_SNAKE_CASE : int = "@@ "
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
a_ : int = set()
a_ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a_ : Union[str, Any] = char
return pairs
# Speech2Text2 has no max input length
SCREAMING_SNAKE_CASE : int = {"facebook/s2t-wav2vec2-large-en-de": 10_24}
class snake_case__ ( __A ):
UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="<pad>" , UpperCamelCase_="</s>" , UpperCamelCase_="<unk>" , UpperCamelCase_=False , UpperCamelCase_=None , **UpperCamelCase_ , ) -> Dict:
"""simple docstring"""
super().__init__(
unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , **UpperCamelCase_ , )
a_ : str = do_lower_case
with open(UpperCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
a_ : Any = json.load(UpperCamelCase_ )
a_ : List[Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
a_ : str = None
a_ : Tuple = None
else:
with open(UpperCamelCase_ , encoding="""utf-8""" ) as merges_handle:
a_ : Any = merges_handle.read().split("""\n""" )[:-1]
a_ : Optional[int] = [tuple(merge.split()[:2] ) for merge in merges]
a_ : Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
a_ : Union[str, Any] = {}
@property
def A ( self ) -> int:
"""simple docstring"""
return len(self.decoder )
def A ( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def A ( self , UpperCamelCase_ ) -> List[str]:
"""simple docstring"""
a_ : Union[str, Any] = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
a_ : Dict = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
a_ : Optional[Any] = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
a_ , a_ : List[Any] = bigram
a_ : Any = []
a_ : Any = 0
while i < len(UpperCamelCase_ ):
try:
a_ : int = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a_ : str = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a_ : str = tuple(UpperCamelCase_ )
a_ : List[str] = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
a_ : Union[str, Any] = get_pairs(UpperCamelCase_ )
a_ : str = """ """.join(UpperCamelCase_ )
if word == "\n " + BPE_TOKEN_MERGES:
a_ : Tuple = """\n""" + BPE_TOKEN_MERGES
if word.endswith(UpperCamelCase_ ):
a_ : int = word.replace(UpperCamelCase_ , """""" )
a_ : List[str] = word.replace(""" """ , UpperCamelCase_ )
a_ : int = word
return word
def A ( self , UpperCamelCase_ ) -> int:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"""This tokenizer was instantiated without a `merges.txt` file, so"""
""" that it can only be used for decoding, not for encoding."""
"""Make sure to provide `merges.txt` file at instantiation to enable """
"""encoding.""" )
if self.do_lower_case:
a_ : str = text.lower()
a_ : List[Any] = text.split()
a_ : str = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(""" """ ) ) )
return split_tokens
def A ( self , UpperCamelCase_ ) -> int:
"""simple docstring"""
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def A ( self , UpperCamelCase_ ) -> str:
"""simple docstring"""
a_ : int = self.decoder.get(UpperCamelCase_ , self.unk_token )
return result
def A ( self , UpperCamelCase_ ) -> str:
"""simple docstring"""
a_ : Optional[Any] = """ """.join(UpperCamelCase_ )
# make sure @@ tokens are concatenated
a_ : str = """""".join(string.split(UpperCamelCase_ ) )
return string
def A ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a_ : Optional[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
a_ : Dict = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + """\n""" )
a_ : Optional[Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
a_ : List[Any] = token_index
writer.write(""" """.join(UpperCamelCase_ ) + """\n""" )
index += 1
return (vocab_file, merges_file)
| 419 | 0 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __lowerCamelCase ( A__ : Union[str, Any] ) -> Tuple:
lowerCamelCase_ : List[str] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def __lowerCamelCase ( A__ : Tuple ) -> Union[str, Any]:
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = emb.weight.shape
lowerCamelCase_ : Tuple = nn.Linear(A__ , A__ , bias=A__ )
lowerCamelCase_ : Any = emb.weight.data
return lin_layer
def __lowerCamelCase ( A__ : str , A__ : Optional[Any]=None ) -> List[str]:
lowerCamelCase_ : Union[str, Any] = {}
for old_key in state_dict.keys():
lowerCamelCase_ : Tuple = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCamelCase_ : Optional[Any] = key.replace("""moe_layer.experts.0""" , f'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCamelCase_ : Tuple = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
lowerCamelCase_ : Dict = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
lowerCamelCase_ : Dict = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
lowerCamelCase_ : int = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
lowerCamelCase_ : Optional[int] = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
lowerCamelCase_ : Union[str, Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
lowerCamelCase_ : Any = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
lowerCamelCase_ : List[str] = state_dict[old_key]
return new_dict
def __lowerCamelCase ( A__ : Optional[Any] , A__ : Any , A__ : Union[str, Any] , A__ : Any , A__ : str = WEIGHTS_NAME ) -> int:
lowerCamelCase_ : str = []
lowerCamelCase_ : Any = 0
os.makedirs(A__ , exist_ok=A__ )
for expert in range(A__ ):
lowerCamelCase_ : str = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(A__ ):
lowerCamelCase_ : str = torch.load(A__ )["""model"""]
remove_ignore_keys_(A__ )
lowerCamelCase_ : List[Any] = rename_fairseq_keys(A__ , A__ )
lowerCamelCase_ : Dict = os.path.join(
A__ , weights_name.replace(""".bin""" , f'''-{len(A__ )+1:05d}-of-???.bin''' ) )
torch.save(A__ , A__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(A__ )[0]].dtype )
# Add the last block
lowerCamelCase_ : Tuple = os.path.join(A__ , weights_name.replace(""".bin""" , f'''-{len(A__ )+1:05d}-of-???.bin''' ) )
lowerCamelCase_ : Optional[int] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(A__ )
lowerCamelCase_ : Any = rename_fairseq_keys(A__ , A__ )
lowerCamelCase_ : List[Any] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(A__ ) == 1:
lowerCamelCase_ : List[Any] = os.path.join(A__ , A__ )
torch.save(A__ , A__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(A__ , A__ )
# Otherwise, let's build the index
lowerCamelCase_ : Union[str, Any] = {}
for idx, shard in enumerate(A__ ):
lowerCamelCase_ : Optional[int] = weights_name.replace(""".bin""" , f'''-{idx+1:05d}-of-{len(A__ ):05d}.bin''' )
lowerCamelCase_ : Optional[Any] = os.path.join(A__ , weights_name.replace(""".bin""" , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(A__ , os.path.join(A__ , A__ ) )
for key in shard:
lowerCamelCase_ : List[str] = shard_file
# Add the metadata
lowerCamelCase_ : Dict = {"""total_size""": total_size}
lowerCamelCase_ : List[str] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(A__ , A__ ) , """w""" , encoding="""utf-8""" ) as f:
lowerCamelCase_ : Optional[int] = json.dumps(A__ , indent=2 , sort_keys=A__ ) + """\n"""
f.write(A__ )
return metadata, index
if __name__ == "__main__":
snake_case__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
snake_case__ : int = parser.parse_args()
snake_case__ , snake_case__ : Tuple = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
snake_case__ : List[Any] = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
snake_case__ : Union[str, Any] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 171 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class SCREAMING_SNAKE_CASE_ (unittest.TestCase ):
'''simple docstring'''
_a = MODEL_FOR_CAUSAL_LM_MAPPING
_a = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _lowerCAmelCase ( self : Optional[Any] ) ->int:
lowerCamelCase_ : List[str] = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ : List[str] = text_generator("""This is a test""" , do_sample=__a )
self.assertEqual(
__a , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
lowerCamelCase_ : List[str] = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__a , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
lowerCamelCase_ : Any = text_generator("""This is a test""" , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{"""generated_token_ids""": ANY(__a )},
{"""generated_token_ids""": ANY(__a )},
] , )
lowerCamelCase_ : List[Any] = text_generator.model.config.eos_token_id
lowerCamelCase_ : Tuple = """<pad>"""
lowerCamelCase_ : Union[str, Any] = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"""generated_token_ids""": ANY(__a )},
{"""generated_token_ids""": ANY(__a )},
],
[
{"""generated_token_ids""": ANY(__a )},
{"""generated_token_ids""": ANY(__a )},
],
] , )
@require_tf
def _lowerCAmelCase ( self : Union[str, Any] ) ->int:
lowerCamelCase_ : int = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
lowerCamelCase_ : Dict = text_generator("""This is a test""" , do_sample=__a )
self.assertEqual(
__a , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
lowerCamelCase_ : str = text_generator(["""This is a test""", """This is a second test"""] , do_sample=__a )
self.assertEqual(
__a , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def _lowerCAmelCase ( self : int , __a : Any , __a : List[str] , __a : Optional[Any] ) ->Tuple:
lowerCamelCase_ : List[str] = TextGenerationPipeline(model=__a , tokenizer=__a )
return text_generator, ["This is a test", "Another test"]
def _lowerCAmelCase ( self : int ) ->Optional[int]:
lowerCamelCase_ : str = """Hello I believe in"""
lowerCamelCase_ : List[Any] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
lowerCamelCase_ : Dict = text_generator(__a )
self.assertEqual(
__a , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
lowerCamelCase_ : Union[str, Any] = text_generator(__a , stop_sequence=""" fe""" )
self.assertEqual(__a , [{"""generated_text""": """Hello I believe in fe"""}] )
def _lowerCAmelCase ( self : Optional[int] , __a : Optional[Any] , __a : Optional[Any] ) ->Tuple:
lowerCamelCase_ : int = text_generator.model
lowerCamelCase_ : Any = text_generator.tokenizer
lowerCamelCase_ : Optional[Any] = text_generator("""This is a test""" )
self.assertEqual(__a , [{"""generated_text""": ANY(__a )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
lowerCamelCase_ : Any = text_generator("""This is a test""" , return_full_text=__a )
self.assertEqual(__a , [{"""generated_text""": ANY(__a )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
lowerCamelCase_ : Optional[Any] = pipeline(task="""text-generation""" , model=__a , tokenizer=__a , return_full_text=__a )
lowerCamelCase_ : Any = text_generator("""This is a test""" )
self.assertEqual(__a , [{"""generated_text""": ANY(__a )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
lowerCamelCase_ : Any = text_generator("""This is a test""" , return_full_text=__a )
self.assertEqual(__a , [{"""generated_text""": ANY(__a )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
lowerCamelCase_ : List[str] = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"""generated_text""": ANY(__a )}, {"""generated_text""": ANY(__a )}],
[{"""generated_text""": ANY(__a )}, {"""generated_text""": ANY(__a )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCamelCase_ : List[str] = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"""generated_text""": ANY(__a )}, {"""generated_text""": ANY(__a )}],
[{"""generated_text""": ANY(__a )}, {"""generated_text""": ANY(__a )}],
] , )
with self.assertRaises(__a ):
lowerCamelCase_ : Dict = text_generator("""test""" , return_full_text=__a , return_text=__a )
with self.assertRaises(__a ):
lowerCamelCase_ : Union[str, Any] = text_generator("""test""" , return_full_text=__a , return_tensors=__a )
with self.assertRaises(__a ):
lowerCamelCase_ : Optional[Any] = text_generator("""test""" , return_text=__a , return_tensors=__a )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCamelCase_ : str = text_generator("""""" )
self.assertEqual(__a , [{"""generated_text""": ANY(__a )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCamelCase_ : Union[str, Any] = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCamelCase_ : str = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 500 , max_new_tokens=20 )
lowerCamelCase_ : Optional[int] = text_generator("""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__a ):
text_generator(
"""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowerCAmelCase ( self : Union[str, Any] ) ->Tuple:
import torch
# Classic `model_kwargs`
lowerCamelCase_ : Any = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ : int = pipe("""This is a test""" )
self.assertEqual(
__a , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCamelCase_ : Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCamelCase_ : str = pipe("""This is a test""" )
self.assertEqual(
__a , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCamelCase_ : List[str] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCamelCase_ : str = pipe("""This is a test""" )
self.assertEqual(
__a , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def _lowerCAmelCase ( self : Dict ) ->Optional[Any]:
import torch
lowerCamelCase_ : Any = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowerCAmelCase ( self : List[str] ) ->Optional[int]:
import torch
lowerCamelCase_ : Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=__a , top_p=0.5 )
def _lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
lowerCamelCase_ : Optional[int] = """Hello world"""
lowerCamelCase_ : Optional[Any] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
lowerCamelCase_ : str = logging.get_logger("""transformers.generation.tf_utils""" )
else:
lowerCamelCase_ : int = logging.get_logger("""transformers.generation.utils""" )
lowerCamelCase_ : List[Any] = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__a ) as cl:
lowerCamelCase_ : Any = text_generator(__a , max_length=10 , max_new_tokens=1 )
self.assertIn(__a , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__a ) as cl:
lowerCamelCase_ : int = text_generator(__a , max_new_tokens=1 )
self.assertNotIn(__a , cl.out )
with CaptureLogger(__a ) as cl:
lowerCamelCase_ : Optional[int] = text_generator(__a , max_length=10 )
self.assertNotIn(__a , cl.out )
| 171 | 1 |