code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Union[str, Any] = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
A : Optional[int] = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 15 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
SCREAMING_SNAKE_CASE__ = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 267 | 0 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = 10
snake_case_ = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
snake_case_ = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(UpperCamelCase__ ) ),
} , features=UpperCamelCase__ , )
return dataset
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=UpperCamelCase__ )
return filename
# FILE_CONTENT + files
_UpperCAmelCase : List[str] = """\
Text data.
Second line of data."""
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp('data' ) / 'file.txt'
snake_case_ = FILE_CONTENT
with open(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ )
return filename
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
import bza
snake_case_ = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
snake_case_ = bytes(UpperCamelCase__ , 'utf-8' )
with bza.open(UpperCamelCase__ , 'wb' ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
import gzip
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
snake_case_ = bytes(UpperCamelCase__ , 'utf-8' )
with gzip.open(UpperCamelCase__ , 'wb' ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
snake_case_ = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
snake_case_ = bytes(UpperCamelCase__ , 'utf-8' )
with lza.frame.open(UpperCamelCase__ , 'wb' ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
snake_case_ = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(UpperCamelCase__ , 'w' ) as archive:
archive.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
import tarfile
snake_case_ = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(UpperCamelCase__ , 'w' ) as f:
f.add(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
import lzma
snake_case_ = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
snake_case_ = bytes(UpperCamelCase__ , 'utf-8' )
with lzma.open(UpperCamelCase__ , 'wb' ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
import zipfile
snake_case_ = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
snake_case_ = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
snake_case_ = bytes(UpperCamelCase__ , 'utf-8' )
with zstd.open(UpperCamelCase__ , 'wb' ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp('data' ) / 'file.xml'
snake_case_ = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ )
return filename
_UpperCAmelCase : Union[str, Any] = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
_UpperCAmelCase : List[Any] = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
_UpperCAmelCase : Tuple = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
_UpperCAmelCase : List[Any] = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
_UpperCAmelCase : Dict = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope='session' )
def __lowerCamelCase ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = datasets.Dataset.from_dict(UpperCamelCase__ )
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(UpperCamelCase__ ) ) as con:
snake_case_ = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(UpperCamelCase__ , 'w' , newline='' ) as f:
snake_case_ = csv.DictWriter(UpperCamelCase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(UpperCamelCase__ , 'w' , newline='' ) as f:
snake_case_ = csv.DictWriter(UpperCamelCase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
import bza
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(UpperCamelCase__ , 'rb' ) as f:
snake_case_ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(UpperCamelCase__ , 'wb' ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(UpperCamelCase__ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase__ ) ) )
f.write(UpperCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase__ ) ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
snake_case_ = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(UpperCamelCase__ , 'wb' ) as f:
snake_case_ = pq.ParquetWriter(UpperCamelCase__ , schema=UpperCamelCase__ )
snake_case_ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(UpperCamelCase__ ) )] for k in DATA[0]} , schema=UpperCamelCase__ )
writer.write_table(UpperCamelCase__ )
writer.close()
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
snake_case_ = {'data': DATA}
with open(UpperCamelCase__ , 'w' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
snake_case_ = {'data': DATA_DICT_OF_LISTS}
with open(UpperCamelCase__ , 'w' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(UpperCamelCase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(UpperCamelCase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(UpperCamelCase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(UpperCamelCase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(UpperCamelCase__ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(UpperCamelCase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(UpperCamelCase__ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(UpperCamelCase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
import gzip
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(UpperCamelCase__ , 'rb' ) as orig_file:
with gzip.open(UpperCamelCase__ , 'wb' ) as zipped_file:
zipped_file.writelines(UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
import gzip
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(UpperCamelCase__ , 'rb' ) as orig_file:
with gzip.open(UpperCamelCase__ , 'wb' ) as zipped_file:
zipped_file.writelines(UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ , arcname=os.path.join('nested' , os.path.basename(UpperCamelCase__ ) ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase__ ) ) )
f.write(UpperCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase__ ) ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(UpperCamelCase__ , 'w' ) as f:
f.add(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
f.add(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(UpperCamelCase__ , 'w' ) as f:
f.add(UpperCamelCase__ , arcname=os.path.join('nested' , os.path.basename(UpperCamelCase__ ) ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = ['0', '1', '2', '3']
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(UpperCamelCase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = ['0', '1', '2', '3']
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(UpperCamelCase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = ['0', '1', '2', '3']
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(UpperCamelCase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase__ ) ) )
f.write(UpperCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase__ ) ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename('unsupported.ext' ) )
f.write(UpperCamelCase__ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
snake_case_ = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def __lowerCamelCase ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 108 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , snake_case=1 / 255 , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , snake_case=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
snake_case_ = do_pad
def a ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def a ( self , snake_case , snake_case=False ):
if not batched:
snake_case_ = image_inputs[0]
if isinstance(snake_case , Image.Image ):
snake_case_ , snake_case_ = image.size
else:
snake_case_ , snake_case_ = image.shape[1], image.shape[2]
if w < h:
snake_case_ = int(self.size['shortest_edge'] * h / w )
snake_case_ = self.size['shortest_edge']
elif w > h:
snake_case_ = self.size['shortest_edge']
snake_case_ = int(self.size['shortest_edge'] * w / h )
else:
snake_case_ = self.size['shortest_edge']
snake_case_ = self.size['shortest_edge']
else:
snake_case_ = []
for image in image_inputs:
snake_case_ , snake_case_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ = max(snake_case , key=lambda snake_case : item[0] )[0]
snake_case_ = max(snake_case , key=lambda snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = DetrImageProcessor if is_vision_available() else None
def a ( self ):
snake_case_ = DetrImageProcessingTester(self )
@property
def a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self ):
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , 'image_mean' ) )
self.assertTrue(hasattr(snake_case , 'image_std' ) )
self.assertTrue(hasattr(snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case , 'do_rescale' ) )
self.assertTrue(hasattr(snake_case , 'rescale_factor' ) )
self.assertTrue(hasattr(snake_case , 'do_resize' ) )
self.assertTrue(hasattr(snake_case , 'size' ) )
self.assertTrue(hasattr(snake_case , 'do_pad' ) )
def a ( self ):
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , snake_case )
snake_case_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , snake_case )
def a ( self ):
pass
def a ( self ):
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
snake_case_ = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self ):
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(snake_case , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self ):
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(snake_case , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a ( self ):
# prepare image and target
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {'image_id': 3_9769, 'annotations': target}
# encode them
snake_case_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
snake_case_ = image_processing(images=snake_case , annotations=snake_case , return_tensors='pt' )
# verify pixel values
snake_case_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case )
snake_case_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case , atol=1e-4 ) )
# verify area
snake_case_ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case )
snake_case_ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case , atol=1e-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case ) )
# verify class_labels
snake_case_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case ) )
# verify orig_size
snake_case_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case ) )
# verify size
snake_case_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case ) )
@slow
def a ( self ):
# prepare image, target and masks_path
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
snake_case_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
snake_case_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
snake_case_ = image_processing(images=snake_case , annotations=snake_case , masks_path=snake_case , return_tensors='pt' )
# verify pixel values
snake_case_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case )
snake_case_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case , atol=1e-4 ) )
# verify area
snake_case_ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case )
snake_case_ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case , atol=1e-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case ) )
# verify class_labels
snake_case_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case ) )
# verify masks
snake_case_ = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , snake_case )
# verify orig_size
snake_case_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case ) )
# verify size
snake_case_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case ) )
| 108 | 1 |
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[float, float]:
# Check if the input is valid
if not len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_lowercase , _lowercase , _lowercase : Tuple = equationa
_lowercase , _lowercase , _lowercase : Dict = equationa
# Calculate the determinants of the matrices
_lowercase : str = aa * ba - aa * ba
_lowercase : Any = ca * ba - ca * ba
_lowercase : Optional[int] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_lowercase : Union[str, Any] = determinant_x / determinant
_lowercase : Tuple = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 66 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : int = 50 ):
'''simple docstring'''
snake_case: Dict = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'{solution() = }') | 329 | 0 |
"""simple docstring"""
import math
def __lowercase ( a : int ) -> bool:
return math.sqrt(a ) * math.sqrt(a ) == num
def __lowercase ( a : int ) -> bool:
__snake_case : int =0
__snake_case : List[str] =n
while left <= right:
__snake_case : Tuple =(left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__snake_case : int =mid - 1
else:
__snake_case : Tuple =mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 497 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __lowercase ( a : Any , a : List[Any] ) -> Dict:
__snake_case : Any =checkpoint
__snake_case : Dict ={}
__snake_case : List[Any] =vae_state_dict['''encoder.conv_in.weight''']
__snake_case : List[str] =vae_state_dict['''encoder.conv_in.bias''']
__snake_case : Union[str, Any] =vae_state_dict['''encoder.conv_out.weight''']
__snake_case : Union[str, Any] =vae_state_dict['''encoder.conv_out.bias''']
__snake_case : str =vae_state_dict['''encoder.norm_out.weight''']
__snake_case : str =vae_state_dict['''encoder.norm_out.bias''']
__snake_case : Tuple =vae_state_dict['''decoder.conv_in.weight''']
__snake_case : str =vae_state_dict['''decoder.conv_in.bias''']
__snake_case : List[str] =vae_state_dict['''decoder.conv_out.weight''']
__snake_case : Tuple =vae_state_dict['''decoder.conv_out.bias''']
__snake_case : Union[str, Any] =vae_state_dict['''decoder.norm_out.weight''']
__snake_case : List[str] =vae_state_dict['''decoder.norm_out.bias''']
__snake_case : Tuple =vae_state_dict['''quant_conv.weight''']
__snake_case : List[str] =vae_state_dict['''quant_conv.bias''']
__snake_case : Optional[int] =vae_state_dict['''post_quant_conv.weight''']
__snake_case : Tuple =vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
__snake_case : Union[str, Any] =len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
__snake_case : Union[str, Any] ={
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(a )
}
# Retrieves the keys for the decoder up blocks only
__snake_case : List[Any] =len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
__snake_case : List[Any] ={
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(a )
}
for i in range(a ):
__snake_case : Any =[key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
__snake_case : Optional[Any] =vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
__snake_case : Optional[int] =vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
__snake_case : Optional[Any] =renew_vae_resnet_paths(a )
__snake_case : Union[str, Any] ={'''old''': f'''down.{i}.block''', '''new''': f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
__snake_case : Tuple =[key for key in vae_state_dict if '''encoder.mid.block''' in key]
__snake_case : Any =2
for i in range(1 , num_mid_res_blocks + 1 ):
__snake_case : Tuple =[key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
__snake_case : List[str] =renew_vae_resnet_paths(a )
__snake_case : List[str] ={'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
__snake_case : int =[key for key in vae_state_dict if '''encoder.mid.attn''' in key]
__snake_case : List[Any] =renew_vae_attention_paths(a )
__snake_case : Union[str, Any] ={'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
for i in range(a ):
__snake_case : List[Any] =num_up_blocks - 1 - i
__snake_case : Dict =[
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
__snake_case : int =vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
__snake_case : Optional[Any] =vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
__snake_case : List[str] =renew_vae_resnet_paths(a )
__snake_case : int ={'''old''': f'''up.{block_id}.block''', '''new''': f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
__snake_case : Any =[key for key in vae_state_dict if '''decoder.mid.block''' in key]
__snake_case : Dict =2
for i in range(1 , num_mid_res_blocks + 1 ):
__snake_case : Dict =[key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
__snake_case : Tuple =renew_vae_resnet_paths(a )
__snake_case : Tuple ={'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
__snake_case : Optional[Any] =[key for key in vae_state_dict if '''decoder.mid.attn''' in key]
__snake_case : Dict =renew_vae_attention_paths(a )
__snake_case : List[str] ={'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
return new_checkpoint
def __lowercase ( a : str , a : str , ) -> Optional[int]:
# Only support V1
__snake_case : Union[str, Any] =requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
__snake_case : Union[str, Any] =io.BytesIO(r.content )
__snake_case : Any =OmegaConf.load(a )
__snake_case : Union[str, Any] =512
__snake_case : List[str] ='''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
__snake_case : Any ={}
with safe_open(a , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
__snake_case : Optional[int] =f.get_tensor(a )
else:
__snake_case : Optional[int] =torch.load(a , map_location=a )['''state_dict''']
# Convert the VAE model.
__snake_case : Dict =create_vae_diffusers_config(a , image_size=a )
__snake_case : Union[str, Any] =custom_convert_ldm_vae_checkpoint(a , a )
__snake_case : str =AutoencoderKL(**a )
vae.load_state_dict(a )
vae.save_pretrained(a )
if __name__ == "__main__":
UpperCamelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
UpperCamelCase_ : Optional[Any] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 497 | 1 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_UpperCAmelCase : Tuple = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_UpperCAmelCase : Optional[int] = [ord(letter) for letter in string.ascii_lowercase]
_UpperCAmelCase : List[Any] = {ord(char) for char in VALID_CHARS}
_UpperCAmelCase : Dict = ['''the''', '''be''', '''to''', '''of''', '''and''', '''in''', '''that''', '''have''']
def UpperCamelCase ( lowercase_ : List[str] , lowercase_ : str ) -> str | None:
'''simple docstring'''
lowercase =""
lowercase =42
lowercase =42
lowercase =42
for keychar, cipherchar in zip(cycle(lowercase_ ) , lowercase_ ):
lowercase =cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def UpperCamelCase ( lowercase_ : Tuple ) -> list[str]:
'''simple docstring'''
lowercase =[]
for key in product(lowercase_ , repeat=3 ):
lowercase =try_key(lowercase_ , lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def UpperCamelCase ( lowercase_ : Optional[int] , lowercase_ : Optional[int] ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCamelCase ( lowercase_ : Union[str, Any] = "p059_cipher.txt" ) -> int:
'''simple docstring'''
lowercase =42
lowercase =42
lowercase =42
lowercase =42
lowercase =Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding='''utf-8''' )
lowercase =[int(lowercase_ ) for number in data.strip().split(''',''' )]
lowercase =filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
lowercase =filter_common_word(lowercase_ , lowercase_ )
if len(lowercase_ ) == 1:
break
lowercase =possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 72 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase : List[Any] = torch.nn.Linear(1_0 , 1_0 )
_lowercase : Any = torch.optim.SGD(model.parameters() , 0.1 )
_lowercase : str = Accelerator()
_lowercase : Any = accelerator.prepare(_lowerCAmelCase )
try:
pickle.loads(pickle.dumps(_lowerCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 66 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : List[str] ) -> Optional[int]:
# Initialise PyTorch model
_lowerCamelCase = MobileBertConfig.from_json_file(lowercase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
_lowerCamelCase = MobileBertForPreTraining(lowercase_ )
# Load weights from tf checkpoint
_lowerCamelCase = load_tf_weights_in_mobilebert(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 623 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 1 |
import datasets
from .evaluate import evaluate
a_ = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
a_ = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
a_ = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def __UpperCamelCase ( self : Any , a : Dict , a : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
SCREAMING_SNAKE_CASE : Dict = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE : Any = evaluate(dataset=snake_case_ , predictions=snake_case_ )
return score | 25 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"{price_plus_tax(100, 0.2_5) = }")
print(f"{price_plus_tax(1_2_5.5_0, 0.0_5) = }")
| 27 | 0 |
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->float:
"""simple docstring"""
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
lowercase_ = True
except (ImportError, AttributeError):
lowercase_ = object
def lowerCAmelCase ( *UpperCAmelCase, **UpperCAmelCase ) ->Any:
"""simple docstring"""
pass
lowercase_ = False
lowercase_ = logging.get_logger('''transformers-cli/serving''')
def lowerCAmelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : Optional[int] = pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
return ServeCommand(UpperCAmelCase, args.host, args.port, args.workers )
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : dict
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : List[str]
lowerCamelCase__ : Optional[List[int]]
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : str
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Any
class A__ ( __SCREAMING_SNAKE_CASE ):
@staticmethod
def lowercase ( lowerCamelCase ) -> Dict:
"""simple docstring"""
__magic_name__ : int = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=lowerCamelCase , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=lowerCamelCase , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=lowerCamelCase , default=8888 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=lowerCamelCase , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=lowerCamelCase , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=lowerCamelCase , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=lowerCamelCase , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=lowerCamelCase , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=lowerCamelCase )
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
__magic_name__ : List[str] = pipeline
__magic_name__ : Union[str, Any] = host
__magic_name__ : int = port
__magic_name__ : Optional[int] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F'''Serving model over {host}:{port}''' )
__magic_name__ : int = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=lowerCamelCase , response_class=lowerCamelCase , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=lowerCamelCase , response_class=lowerCamelCase , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=lowerCamelCase , response_class=lowerCamelCase , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=lowerCamelCase , response_class=lowerCamelCase , methods=['''POST'''] , ),
] , timeout=600 , )
def lowercase ( self ) -> Dict:
"""simple docstring"""
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowercase ( self ) -> str:
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowercase ( self , lowerCamelCase = Body(lowerCamelCase , embed=lowerCamelCase ) , lowerCamelCase = Body(lowerCamelCase , embed=lowerCamelCase ) ) -> Any:
"""simple docstring"""
try:
__magic_name__ : List[str] = self._pipeline.tokenizer.tokenize(lowerCamelCase )
if return_ids:
__magic_name__ : int = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCamelCase )
return ServeTokenizeResult(tokens=lowerCamelCase , tokens_ids=lowerCamelCase )
else:
return ServeTokenizeResult(tokens=lowerCamelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(lowerCamelCase )} )
def lowercase ( self , lowerCamelCase = Body(lowerCamelCase , embed=lowerCamelCase ) , lowerCamelCase = Body(lowerCamelCase , embed=lowerCamelCase ) , lowerCamelCase = Body(lowerCamelCase , embed=lowerCamelCase ) , ) -> Any:
"""simple docstring"""
try:
__magic_name__ : Any = self._pipeline.tokenizer.decode(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return ServeDeTokenizeResult(model='''''' , text=lowerCamelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(lowerCamelCase )} )
async def lowercase ( self , lowerCamelCase=Body(lowerCamelCase , embed=lowerCamelCase ) ) -> Optional[int]:
"""simple docstring"""
if len(lowerCamelCase ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__magic_name__ : Optional[int] = self._pipeline(lowerCamelCase )
return ServeForwardResult(output=lowerCamelCase )
except Exception as e:
raise HTTPException(500 , {'''error''': str(lowerCamelCase )} )
| 336 | 0 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class UpperCAmelCase :
def __init__( self : List[str] , __magic_name__ : str , __magic_name__ : Tuple=1_3 , __magic_name__ : Optional[int]=7 , __magic_name__ : Dict=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : List[Any]=True , __magic_name__ : Optional[int]=9_9 , __magic_name__ : int=3_2 , __magic_name__ : Optional[Any]=5 , __magic_name__ : List[str]=4 , __magic_name__ : List[Any]=4 , __magic_name__ : Union[str, Any]="gelu" , __magic_name__ : int=0.0 , __magic_name__ : Dict=0.1 , __magic_name__ : Tuple=True , __magic_name__ : Any=5_1_2 , __magic_name__ : List[str]=1_6 , __magic_name__ : Any=2 , __magic_name__ : List[Any]=0.02 , __magic_name__ : Union[str, Any]=3 , __magic_name__ : List[Any]=4 , __magic_name__ : Optional[Any]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_multiple_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = weight_tying
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase = True
return config, input_ids, input_mask, token_labels
def lowerCamelCase_ ( self : List[Any] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = GPTNeoXJapaneseModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase = model(__magic_name__ , attention_mask=__magic_name__ )
UpperCamelCase = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : int ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = GPTNeoXJapaneseModel(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase = model(__magic_name__ , attention_mask=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Tuple ):
"""simple docstring"""
UpperCamelCase = GPTNeoXJapaneseForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Dict , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : int ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = GPTNeoXJapaneseForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# first forward pass
UpperCamelCase = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ )
UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase = model(__magic_name__ , attention_mask=__magic_name__ , output_hidden_states=__magic_name__ )
UpperCamelCase = output_from_no_past["""hidden_states"""][0]
UpperCamelCase = model(
__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ , output_hidden_states=__magic_name__ , )["""hidden_states"""][0]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
lowercase = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowercase = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowercase = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = GPTNeoXJapaneseModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=__magic_name__ , hidden_size=3_7 )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__magic_name__ , __magic_name__ , __magic_name__ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase = None
self.model_tester.create_and_check_model_as_decoder(__magic_name__ , __magic_name__ , __magic_name__ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__magic_name__ , __magic_name__ , __magic_name__ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__magic_name__ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = """abeja/gpt-neox-japanese-2.7b"""
UpperCamelCase = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
UpperCamelCase = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
UpperCamelCase = GPTNeoXJapaneseTokenizer.from_pretrained(__magic_name__ )
UpperCamelCase = GPTNeoXJapaneseForCausalLM.from_pretrained(__magic_name__ )
UpperCamelCase = []
for prompt in prompts:
UpperCamelCase = tokenizer(__magic_name__ , return_tensors="""pt""" ).input_ids
UpperCamelCase = model.generate(__magic_name__ , max_length=5_0 )
UpperCamelCase = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
predicted_outputs += generated_string
self.assertListEqual(__magic_name__ , __magic_name__ )
| 386 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__snake_case = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def _lowercase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def _lowercase ( SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase = _TestCommandArgs(dataset=SCREAMING_SNAKE_CASE_ , all_configs=SCREAMING_SNAKE_CASE_ , save_infos=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = TestCommand(*SCREAMING_SNAKE_CASE_ )
test_command.run()
UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , """README.md""" )
assert os.path.exists(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2_351_563,
"""num_examples""": 10_000,
},
{
"""name""": """validation""",
"""num_bytes""": 238_418,
"""num_examples""": 1_000,
},
] , download_size=3_940_680 , dataset_size=2_589_981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCamelCase , UpperCamelCase = getattr(dataset_infos["""default"""] , SCREAMING_SNAKE_CASE_ ), getattr(expected_dataset_infos["""default"""] , SCREAMING_SNAKE_CASE_ )
if key == "num_bytes":
assert is_apercent_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif key == "splits":
assert list(SCREAMING_SNAKE_CASE_ ) == list(SCREAMING_SNAKE_CASE_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 386 | 1 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _A ( unittest.TestCase ):
@property
def a ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def a ( self : Tuple ):
"""simple docstring"""
__UpperCamelCase : str = self.dummy_uncond_unet
__UpperCamelCase : Optional[int] = KarrasVeScheduler()
__UpperCamelCase : List[str] = KarrasVePipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : List[str] = torch.manual_seed(0 )
__UpperCamelCase : Any = pipe(num_inference_steps=2 , generator=lowerCamelCase__ , output_type="""numpy""" ).images
__UpperCamelCase : Tuple = torch.manual_seed(0 )
__UpperCamelCase : int = pipe(num_inference_steps=2 , generator=lowerCamelCase__ , output_type="""numpy""" , return_dict=lowerCamelCase__ )[0]
__UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__UpperCamelCase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _A ( unittest.TestCase ):
def a ( self : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = """google/ncsnpp-celebahq-256"""
__UpperCamelCase : Any = UNetaDModel.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] = KarrasVeScheduler()
__UpperCamelCase : Dict = KarrasVePipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Tuple = torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = pipe(num_inference_steps=20 , generator=lowerCamelCase__ , output_type="""numpy""" ).images
__UpperCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__UpperCamelCase : List[str] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 515 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 515 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class a (unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase : Any , lowerCamelCase : int=7 , lowerCamelCase : str=3 , lowerCamelCase : Optional[int]=30 , lowerCamelCase : Dict=400 , lowerCamelCase : str=True , lowerCamelCase : str=None , lowerCamelCase : Any=True , lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase : List[Any]=[0.5, 0.5, 0.5] , lowerCamelCase : List[str]=True , lowerCamelCase : Optional[int]=1 / 255 , lowerCamelCase : Any=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__snake_case : Optional[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
__snake_case : Optional[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : Optional[int] = num_channels
__snake_case : str = min_resolution
__snake_case : int = max_resolution
__snake_case : int = do_resize
__snake_case : Tuple = size
__snake_case : Any = do_normalize
__snake_case : int = image_mean
__snake_case : Tuple = image_std
__snake_case : Dict = do_rescale
__snake_case : Optional[Any] = rescale_factor
__snake_case : str = do_pad
def __snake_case ( self : Any ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __snake_case ( self : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any]=False ) -> List[str]:
if not batched:
__snake_case : Dict = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
__snake_case , __snake_case : Dict = image.size
else:
__snake_case , __snake_case : List[str] = image.shape[1], image.shape[2]
if w < h:
__snake_case : Optional[int] = int(self.size["shortest_edge"] * h / w )
__snake_case : int = self.size["shortest_edge"]
elif w > h:
__snake_case : List[str] = self.size["shortest_edge"]
__snake_case : Optional[Any] = int(self.size["shortest_edge"] * w / h )
else:
__snake_case : List[Any] = self.size["shortest_edge"]
__snake_case : Any = self.size["shortest_edge"]
else:
__snake_case : int = []
for image in image_inputs:
__snake_case , __snake_case : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__snake_case : str = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
__snake_case : str = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = ConditionalDetrImageProcessor if is_vision_available() else None
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : Optional[Any] = ConditionalDetrImageProcessingTester(self )
@property
def __snake_case ( self : Any ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
__snake_case : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def __snake_case ( self : Any ) -> Dict:
__snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
__snake_case : str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> Dict:
pass
def __snake_case ( self : Tuple ) -> str:
# Initialize image_processing
__snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__snake_case : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case , __snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
__snake_case : Dict = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case ( self : int ) -> str:
# Initialize image_processing
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : List[Any] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case ( self : int ) -> List[str]:
# Initialize image_processing
__snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : List[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : int = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __snake_case ( self : Any ) -> Optional[int]:
# prepare image and target
__snake_case : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__snake_case : str = json.loads(f.read() )
__snake_case : List[Any] = {"image_id": 39769, "annotations": target}
# encode them
__snake_case : List[str] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
__snake_case : List[str] = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__snake_case : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__snake_case : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__snake_case : List[Any] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__snake_case : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__snake_case : List[Any] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__snake_case : Optional[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__snake_case : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__snake_case : Dict = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
__snake_case : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__snake_case : Tuple = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def __snake_case ( self : str ) -> Tuple:
# prepare image, target and masks_path
__snake_case : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__snake_case : str = json.loads(f.read() )
__snake_case : str = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
__snake_case : Optional[int] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__snake_case : int = ConditionalDetrImageProcessor(format="coco_panoptic" )
__snake_case : str = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__snake_case : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__snake_case : Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__snake_case : Any = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__snake_case : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__snake_case : Optional[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__snake_case : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__snake_case : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__snake_case : int = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
__snake_case : List[Any] = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
__snake_case : List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__snake_case : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 81 |
class a :
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : list ) -> None:
__snake_case : str = set_counts
__snake_case : Union[str, Any] = max(lowerCamelCase )
__snake_case : List[Any] = len(lowerCamelCase )
__snake_case : Tuple = [1] * num_sets
__snake_case : Dict = list(range(lowerCamelCase ) )
def __snake_case ( self : str , lowerCamelCase : int , lowerCamelCase : int ) -> bool:
__snake_case : List[Any] = self.get_parent(lowerCamelCase )
__snake_case : Tuple = self.get_parent(lowerCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__snake_case : List[str] = 0
__snake_case : List[Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__snake_case : Dict = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__snake_case : Union[str, Any] = 0
__snake_case : Optional[int] = src_parent
__snake_case : Tuple = self.set_counts[src_parent]
__snake_case : str = max(self.max_set , lowerCamelCase )
return True
def __snake_case ( self : int , lowerCamelCase : int ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
__snake_case : Optional[int] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 81 | 1 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
snake_case__ : Dict = logging.get_logger(__name__)
def __lowerCamelCase ( A__ : List[Any] , A__ : List[Any] , A__ : Dict ) -> Tuple:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def __lowerCamelCase ( A__ : np.ndarray , A__ : Optional[str] , A__ : Optional[str] = None ) -> List[str]:
lowerCamelCase_ : str = tesseract_config if tesseract_config is not None else """"""
# apply OCR
lowerCamelCase_ : Optional[Any] = to_pil_image(A__ )
lowerCamelCase_, lowerCamelCase_ : int = pil_image.size
lowerCamelCase_ : str = pytesseract.image_to_data(A__ , lang=A__ , output_type="""dict""" , config=A__ )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Any = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
lowerCamelCase_ : Any = [idx for idx, word in enumerate(A__ ) if not word.strip()]
lowerCamelCase_ : int = [word for idx, word in enumerate(A__ ) if idx not in irrelevant_indices]
lowerCamelCase_ : Union[str, Any] = [coord for idx, coord in enumerate(A__ ) if idx not in irrelevant_indices]
lowerCamelCase_ : int = [coord for idx, coord in enumerate(A__ ) if idx not in irrelevant_indices]
lowerCamelCase_ : List[Any] = [coord for idx, coord in enumerate(A__ ) if idx not in irrelevant_indices]
lowerCamelCase_ : Optional[Any] = [coord for idx, coord in enumerate(A__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCamelCase_ : List[Any] = []
for x, y, w, h in zip(A__ , A__ , A__ , A__ ):
lowerCamelCase_ : List[Any] = [x, y, x + w, y + h]
actual_boxes.append(A__ )
# finally, normalize the bounding boxes
lowerCamelCase_ : Tuple = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(A__ , A__ , A__ ) )
assert len(A__ ) == len(A__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = ["pixel_values"]
def __init__( self : Optional[int] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Optional[str] = None , __a : Optional[str] = "" , **__a : Tuple , ) ->None:
super().__init__(**__a )
lowerCamelCase_ : Union[str, Any] = size if size is not None else {"""height""": 224, """width""": 224}
lowerCamelCase_ : Any = get_size_dict(__a )
lowerCamelCase_ : Optional[Any] = do_resize
lowerCamelCase_ : List[Any] = size
lowerCamelCase_ : str = resample
lowerCamelCase_ : Union[str, Any] = apply_ocr
lowerCamelCase_ : Dict = ocr_lang
lowerCamelCase_ : Optional[int] = tesseract_config
def _lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) ->np.ndarray:
lowerCamelCase_ : Union[str, Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowerCamelCase_ : Optional[Any] = (size["""height"""], size["""width"""])
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def _lowerCAmelCase ( self : Union[str, Any] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Optional[str] = None , __a : Optional[str] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : Optional[Any] , ) ->PIL.Image.Image:
lowerCamelCase_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ : List[Any] = size if size is not None else self.size
lowerCamelCase_ : int = get_size_dict(__a )
lowerCamelCase_ : Optional[Any] = resample if resample is not None else self.resample
lowerCamelCase_ : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCamelCase_ : Tuple = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCamelCase_ : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCamelCase_ : Tuple = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase_ : List[Any] = [to_numpy_array(__a ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
lowerCamelCase_ : Optional[Any] = []
lowerCamelCase_ : str = []
for image in images:
lowerCamelCase_, lowerCamelCase_ : str = apply_tesseract(__a , __a , __a )
words_batch.append(__a )
boxes_batch.append(__a )
if do_resize:
lowerCamelCase_ : Tuple = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowerCamelCase_ : Dict = [flip_channel_order(__a ) for image in images]
lowerCamelCase_ : Optional[Any] = [to_channel_dimension_format(__a , __a ) for image in images]
lowerCamelCase_ : Union[str, Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=__a )
if apply_ocr:
lowerCamelCase_ : str = words_batch
lowerCamelCase_ : Union[str, Any] = boxes_batch
return data
| 171 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
snake_case__ : Dict = None
try:
import msvcrt
except ImportError:
snake_case__ : Any = None
try:
import fcntl
except ImportError:
snake_case__ : int = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
snake_case__ : Optional[int] = OSError
# Data
# ------------------------------------------------
snake_case__ : Tuple = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
snake_case__ : Union[str, Any] = '3.0.12'
snake_case__ : List[Any] = None
def __lowerCamelCase ( ) -> Dict:
global _logger
lowerCamelCase_ : Dict = _logger or logging.getLogger(__name__ )
return _logger
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
def __init__( self : List[str] , __a : Union[str, Any] ) ->Tuple:
lowerCamelCase_ : int = lock_file
return None
def __str__( self : Optional[int] ) ->Optional[Any]:
lowerCamelCase_ : Optional[int] = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : int , __a : Any ) ->Optional[Any]:
lowerCamelCase_ : Union[str, Any] = lock
return None
def __enter__( self : List[Any] ) ->Union[str, Any]:
return self.lock
def __exit__( self : Optional[Any] , __a : int , __a : str , __a : str ) ->str:
self.lock.release()
return None
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Union[str, Any] , __a : Optional[int]=-1 , __a : List[str]=None ) ->Optional[Any]:
lowerCamelCase_ : str = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
lowerCamelCase_ : Optional[Any] = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
lowerCamelCase_ : Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowerCamelCase_ : List[Any] = None
# The default timeout value.
lowerCamelCase_ : List[str] = timeout
# We use this lock primarily for the lock counter.
lowerCamelCase_ : str = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowerCamelCase_ : List[Any] = 0
return None
@property
def _lowerCAmelCase ( self : int ) ->str:
return self._lock_file
@property
def _lowerCAmelCase ( self : List[Any] ) ->int:
return self._timeout
@timeout.setter
def _lowerCAmelCase ( self : str , __a : Any ) ->Dict:
lowerCamelCase_ : Dict = float(__a )
return None
def _lowerCAmelCase ( self : str ) ->Tuple:
raise NotImplementedError()
def _lowerCAmelCase ( self : str ) ->List[Any]:
raise NotImplementedError()
@property
def _lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
return self._lock_file_fd is not None
def _lowerCAmelCase ( self : List[str] , __a : Union[str, Any]=None , __a : Dict=0.05 ) ->List[Any]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
lowerCamelCase_ : str = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowerCamelCase_ : List[Any] = id(self )
lowerCamelCase_ : List[Any] = self._lock_file
lowerCamelCase_ : Union[str, Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowerCamelCase_ : Any = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _lowerCAmelCase ( self : str , __a : Dict=False ) ->Union[str, Any]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowerCamelCase_ : Optional[int] = id(self )
lowerCamelCase_ : Union[str, Any] = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
lowerCamelCase_ : int = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self : List[Any] ) ->Tuple:
self.acquire()
return self
def __exit__( self : List[Any] , __a : str , __a : Dict , __a : Dict ) ->Optional[Any]:
self.release()
return None
def __del__( self : Optional[Any] ) ->Tuple:
self.release(force=__a )
return None
def _lowerCAmelCase ( self : Tuple , __a : str , __a : int ) ->str:
lowerCamelCase_ : Optional[int] = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
lowerCamelCase_ : List[Any] = os.path.dirname(__a )
lowerCamelCase_ : str = str(hash(__a ) )
lowerCamelCase_ : Any = filename[: max_length - len(__a ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__a , __a )
else:
return path
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
def __init__( self : Tuple , __a : str , __a : Union[str, Any]=-1 , __a : Optional[Any]=None ) ->int:
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
lowerCamelCase_ : Tuple = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def _lowerCAmelCase ( self : Tuple ) ->List[str]:
lowerCamelCase_ : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowerCamelCase_ : Optional[Any] = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
lowerCamelCase_ : str = fd
return None
def _lowerCAmelCase ( self : str ) ->int:
lowerCamelCase_ : Dict = self._lock_file_fd
lowerCamelCase_ : List[str] = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
def __init__( self : Tuple , __a : Tuple , __a : List[str]=-1 , __a : Dict=None ) ->Union[str, Any]:
lowerCamelCase_ : Dict = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def _lowerCAmelCase ( self : str ) ->str:
lowerCamelCase_ : Any = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowerCamelCase_ : Dict = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
lowerCamelCase_ : Optional[Any] = fd
return None
def _lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
lowerCamelCase_ : Any = self._lock_file_fd
lowerCamelCase_ : Tuple = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
def _lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
lowerCamelCase_ : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowerCamelCase_ : Any = os.open(self._lock_file , __a )
except OSError:
pass
else:
lowerCamelCase_ : List[str] = fd
return None
def _lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
os.close(self._lock_file_fd )
lowerCamelCase_ : List[str] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
snake_case__ : List[str] = None
if msvcrt:
snake_case__ : Optional[Any] = WindowsFileLock
elif fcntl:
snake_case__ : Union[str, Any] = UnixFileLock
else:
snake_case__ : Dict = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 171 | 1 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
SCREAMING_SNAKE_CASE = HUGGINGFACE_HUB_CACHE
SCREAMING_SNAKE_CASE = 'config.json'
SCREAMING_SNAKE_CASE = 'diffusion_pytorch_model.bin'
SCREAMING_SNAKE_CASE = 'diffusion_flax_model.msgpack'
SCREAMING_SNAKE_CASE = 'model.onnx'
SCREAMING_SNAKE_CASE = 'diffusion_pytorch_model.safetensors'
SCREAMING_SNAKE_CASE = 'weights.pb'
SCREAMING_SNAKE_CASE = 'https://huggingface.co'
SCREAMING_SNAKE_CASE = default_cache_path
SCREAMING_SNAKE_CASE = 'diffusers_modules'
SCREAMING_SNAKE_CASE = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
SCREAMING_SNAKE_CASE = ['fp16', 'non-ema']
SCREAMING_SNAKE_CASE = '.self_attn'
| 94 | import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
A_ = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
A_ = {"facebook/blenderbot_small-90M": 512}
def __UpperCAmelCase ( UpperCAmelCase )-> Tuple:
"""simple docstring"""
lowercase = set()
lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase = char
lowercase = set(UpperCAmelCase )
return pairs
class __lowercase ( _A ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]="__start__" , __lowerCamelCase : List[str]="__end__" , __lowerCamelCase : List[str]="__unk__" , __lowerCamelCase : Optional[int]="__null__" , **__lowerCamelCase : List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , **__lowerCamelCase )
with open(__lowerCamelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase = json.load(__lowerCamelCase )
lowercase = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
lowercase = merges_handle.read().split('''\n''' )[1:-1]
lowercase = [tuple(merge.split() ) for merge in merges]
lowercase = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
lowercase = {}
@property
def __a ( self : int ) -> int:
'''simple docstring'''
return len(self.encoder )
def __a ( self : Any ) -> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : Optional[int] , __lowerCamelCase : str ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase = re.sub('''([.,!?()])''' , r''' \1''' , __lowerCamelCase )
lowercase = re.sub('''(\')''' , r''' \1 ''' , __lowerCamelCase )
lowercase = re.sub(r'''\s{2,}''' , ''' ''' , __lowerCamelCase )
if "\n" in token:
lowercase = token.replace('''\n''' , ''' __newln__''' )
lowercase = token.split(''' ''' )
lowercase = []
for token in tokens:
if not len(__lowerCamelCase ):
continue
lowercase = token.lower()
lowercase = tuple(__lowerCamelCase )
lowercase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowercase = get_pairs(__lowerCamelCase )
if not pairs:
words.append(__lowerCamelCase )
continue
while True:
lowercase = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase ,lowercase = bigram
lowercase = []
lowercase = 0
while i < len(__lowerCamelCase ):
try:
lowercase = word.index(__lowerCamelCase , __lowerCamelCase )
new_word.extend(word[i:j] )
lowercase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase = tuple(__lowerCamelCase )
lowercase = new_word
if len(__lowerCamelCase ) == 1:
break
else:
lowercase = get_pairs(__lowerCamelCase )
lowercase = '''@@ '''.join(__lowerCamelCase )
lowercase = word[:-4]
lowercase = word
words.append(__lowerCamelCase )
return " ".join(__lowerCamelCase )
def __a ( self : str , __lowerCamelCase : str ) -> List[str]:
'''simple docstring'''
lowercase = []
lowercase = re.findall(r'''\S+\n?''' , __lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def __a ( self : Optional[int] , __lowerCamelCase : str ) -> int:
'''simple docstring'''
lowercase = token.lower()
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def __a ( self : Optional[Any] , __lowerCamelCase : int ) -> str:
'''simple docstring'''
return self.decoder.get(__lowerCamelCase , self.unk_token )
def __a ( self : int , __lowerCamelCase : List[str] ) -> str:
'''simple docstring'''
lowercase = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __a ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + '''\n''' )
lowercase = 0
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase = token_index
writer.write(''' '''.join(__lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 604 | 0 |
"""simple docstring"""
lowerCamelCase__ : Dict = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 716 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __A ( a_ : int , a_ : int )-> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __A ( a_ : int )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : List[str] = 11
SCREAMING_SNAKE_CASE : Union[str, Any] = int('''1''' + '''0''' * digit_len )
for num in range(a_ , a_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a_ , a_ ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10
return solutions
def __A ( a_ : int = 2 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 1.0
for fraction in fraction_list(a_ ):
SCREAMING_SNAKE_CASE : List[str] = Fraction(a_ )
result *= frac.denominator / frac.numerator
return int(a_ )
if __name__ == "__main__":
print(solution())
| 18 | 0 |
"""simple docstring"""
class lowerCamelCase__ :
def __init__( self : Optional[Any] , A_ : Dict , A_ : str , A_ : Any ):
'''simple docstring'''
__lowercase = None
__lowercase = None
__lowercase = graph
self._normalize_graph(A_ , A_ )
__lowercase = len(A_ )
__lowercase = None
def SCREAMING_SNAKE_CASE_ ( self : Dict , A_ : str , A_ : List[Any] ):
'''simple docstring'''
if sources is int:
__lowercase = [sources]
if sinks is int:
__lowercase = [sinks]
if len(A_ ) == 0 or len(A_ ) == 0:
return
__lowercase = sources[0]
__lowercase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(A_ ) > 1 or len(A_ ) > 1:
__lowercase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__lowercase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__lowercase = max_input_flow
__lowercase = 0
__lowercase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__lowercase = max_input_flow
__lowercase = size - 1
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def SCREAMING_SNAKE_CASE_ ( self : List[str] , A_ : Dict ):
'''simple docstring'''
__lowercase = algorithm(self )
class lowerCamelCase__ :
def __init__( self : Tuple , A_ : Optional[int] ):
'''simple docstring'''
__lowercase = flow_network
__lowercase = flow_network.verticesCount
__lowercase = flow_network.sourceIndex
__lowercase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__lowercase = flow_network.graph
__lowercase = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
if not self.executed:
self._algorithm()
__lowercase = True
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
class lowerCamelCase__ ( _a ):
def __init__( self : Union[str, Any] , A_ : int ):
'''simple docstring'''
super().__init__(A_ )
# use this to save your result
__lowercase = -1
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowerCamelCase__ ( _a ):
def __init__( self : List[str] , A_ : Tuple ):
'''simple docstring'''
super().__init__(A_ )
__lowercase = [[0] * self.verticies_count for i in range(self.verticies_count )]
__lowercase = [0] * self.verticies_count
__lowercase = [0] * self.verticies_count
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__lowercase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__lowercase = 0
while i < len(A_ ):
__lowercase = vertices_list[i]
__lowercase = self.heights[vertex_index]
self.process_vertex(A_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(A_ ) )
__lowercase = 0
else:
i += 1
__lowercase = sum(self.preflow[self.source_index] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : Optional[int] ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(A_ , A_ )
self.relabel(A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , A_ : Union[str, Any] , A_ : List[str] ):
'''simple docstring'''
__lowercase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : Optional[Any] ):
'''simple docstring'''
__lowercase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__lowercase = self.heights[to_index]
if min_height is not None:
__lowercase = min_height + 1
if __name__ == "__main__":
UpperCAmelCase__ =[0]
UpperCAmelCase__ =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase__ =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase__ =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase__ =flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 616 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCAmelCase__ =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCAmelCase__ =" \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) )
__lowercase = self.diffusers_dir
shutil.copy(
os.path.join(A_ , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : List[str] , A_ : int , A_ : Optional[Any] , A_ : str=None ):
'''simple docstring'''
__lowercase = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
__lowercase = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
__lowercase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
__lowercase = black.format_str(A_ , mode=A_ )
__lowercase = os.path.join(self.diffusers_dir , """new_code.py""" )
with open(A_ , """w""" , newline="""\n""" ) as f:
f.write(A_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=A_ )
with open(A_ , """r""" ) as f:
self.assertTrue(f.read() , A_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , A_ , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , A_ ) , )
# Copy consistency with a really long name
__lowercase = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("""Bert""" , A_ , A_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , A_ , overwrite_result=re.sub("""DDPM""" , """Test""" , A_ ) , )
| 616 | 1 |
import string
from math import logaa
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
__snake_case : Optional[Any] = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
__snake_case : Union[str, Any] = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
__snake_case : List[Any] = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
__snake_case : Optional[int] = corpus_without_punctuation.split("""\n""" )
__snake_case : Any = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__snake_case ))
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
return round(tf * idf , 3 )
| 707 | def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 390 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__: List[str] = logging.get_logger(__name__)
__magic_name__: Dict = {"vocab_file": "spm_char.model"}
__magic_name__: Tuple = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
__magic_name__: List[str] = {
"microsoft/speecht5_asr": 1_024,
"microsoft/speecht5_tts": 1_024,
"microsoft/speecht5_vc": 1_024,
}
class snake_case__ ( _SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Tuple:
__magic_name__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
__magic_name__ : Optional[Any] = vocab_file
__magic_name__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def __magic_name__ ( self ) -> int:
return self.sp_model.get_piece_size()
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Any = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.__dict__.copy()
__magic_name__ : Any = None
return state
def __setstate__( self , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__magic_name__ : List[str] = {}
__magic_name__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.sp_model.piece_to_id(lowerCAmelCase_ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
__magic_name__ : Tuple = self.sp_model.IdToPiece(lowerCAmelCase_ )
return token
def __magic_name__ ( self , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : Any = []
__magic_name__ : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
__magic_name__ : Union[str, Any] = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Optional[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
__magic_name__ : Tuple = [1]
if token_ids_a is None:
return ([0] * len(lowerCAmelCase_ )) + suffix_ones
return ([0] * len(lowerCAmelCase_ )) + ([0] * len(lowerCAmelCase_ )) + suffix_ones
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> int:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : Optional[Any] = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , """wb""" ) as fi:
__magic_name__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 324 |
"""simple docstring"""
def __UpperCamelCase ( snake_case__ = 200 ):
A_ : Union[str, Any] = [1, 2, 5, 10, 20, 50, 100, 200]
A_ : int = [0] * (pence + 1)
A_ : Tuple = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(snake_case__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 180 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Dict = logging.get_logger(__name__)
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False ):
lowerCamelCase_: int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""transformer.blocks.{i}.norm1.weight""", f"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.norm1.bias""", f"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.weight""", f"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.bias""", f"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""transformer.blocks.{i}.norm2.weight""", f"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.norm2.bias""", f"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""transformer.blocks.{i}.mlp.fc1.weight""", f"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc1.bias""", f"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.weight""", f"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.bias""", f"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ):
for i in range(config.num_hidden_layers ):
lowerCamelCase_: Union[str, Any] = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_: List[str] = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.weight""" )
lowerCamelCase_: Optional[int] = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_: Tuple = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase_: Optional[int] = in_proj_bias[: config.hidden_size]
lowerCamelCase_: List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_: Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_: Dict = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase_: Tuple = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( _UpperCAmelCase ):
lowerCamelCase_: int = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_: List[str] = dct.pop(_UpperCAmelCase )
lowerCamelCase_: Optional[Any] = val
@torch.no_grad()
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_: Union[str, Any] = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=_UpperCAmelCase )
lowerCamelCase_: Dict = False
lowerCamelCase_: Tuple = False
lowerCamelCase_: Optional[int] = False
lowerCamelCase_: Any = False
if "vqa" in checkpoint_url:
lowerCamelCase_: Optional[Any] = True
lowerCamelCase_: Any = 3_1_2_9
lowerCamelCase_: int = """huggingface/label-files"""
lowerCamelCase_: Any = """vqa2-id2label.json"""
lowerCamelCase_: Union[str, Any] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase_: List[str] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase_: List[str] = idalabel
lowerCamelCase_: Any = {v: k for k, v in idalabel.items()}
lowerCamelCase_: Union[str, Any] = ViltForQuestionAnswering(_UpperCAmelCase )
elif "nlvr" in checkpoint_url:
lowerCamelCase_: Union[str, Any] = True
lowerCamelCase_: Union[str, Any] = 2
lowerCamelCase_: List[Any] = {0: """False""", 1: """True"""}
lowerCamelCase_: str = {v: k for k, v in config.idalabel.items()}
lowerCamelCase_: List[Any] = 3
lowerCamelCase_: int = ViltForImagesAndTextClassification(_UpperCAmelCase )
elif "irtr" in checkpoint_url:
lowerCamelCase_: str = True
lowerCamelCase_: Dict = ViltForImageAndTextRetrieval(_UpperCAmelCase )
elif "mlm_itm" in checkpoint_url:
lowerCamelCase_: List[str] = True
lowerCamelCase_: List[str] = ViltForMaskedLM(_UpperCAmelCase )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
lowerCamelCase_: List[Any] = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location="""cpu""" )["""state_dict"""]
lowerCamelCase_: List[str] = create_rename_keys(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase )
if mlm_model or irtr_model:
lowerCamelCase_: Optional[int] = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCamelCase_ , lowerCamelCase_: List[Any] = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_UpperCAmelCase )
# Define processor
lowerCamelCase_: Dict = ViltImageProcessor(size=3_8_4 )
lowerCamelCase_: Dict = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowerCamelCase_: str = ViltProcessor(_UpperCAmelCase , _UpperCAmelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCamelCase_: Tuple = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=_UpperCAmelCase ).raw )
lowerCamelCase_: List[str] = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=_UpperCAmelCase ).raw )
lowerCamelCase_: List[str] = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
lowerCamelCase_: Any = processor(_UpperCAmelCase , _UpperCAmelCase , return_tensors="""pt""" )
lowerCamelCase_: List[Any] = processor(_UpperCAmelCase , _UpperCAmelCase , return_tensors="""pt""" )
lowerCamelCase_: int = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCamelCase_: Tuple = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=_UpperCAmelCase ).raw )
if mlm_model:
lowerCamelCase_: str = """a bunch of [MASK] laying on a [MASK]."""
else:
lowerCamelCase_: Dict = """How many cats are there?"""
lowerCamelCase_: Any = processor(_UpperCAmelCase , _UpperCAmelCase , return_tensors="""pt""" )
lowerCamelCase_: Tuple = model(**_UpperCAmelCase )
# Verify outputs
if mlm_model:
lowerCamelCase_: List[str] = torch.Size([1, 1_1, 3_0_5_2_2] )
lowerCamelCase_: Optional[Any] = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _UpperCAmelCase , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCamelCase_: Tuple = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCamelCase_: str = torch.Size([1, 3_1_2_9] )
lowerCamelCase_: Optional[Any] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _UpperCAmelCase , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCamelCase_: str = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCamelCase_: Dict = torch.Size([1, 2] )
lowerCamelCase_: Any = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowercase : Optional[Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 584 | from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
if attention_mask is None:
lowerCamelCase_: Optional[int] = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class a__ :
_A = OPTConfig
_A = {}
_A = "gelu"
def __init__( self : int , A_ : List[str] , A_ : Dict=13 , A_ : str=7 , A_ : Dict=True , A_ : int=False , A_ : Any=99 , A_ : Dict=16 , A_ : List[str]=2 , A_ : Dict=4 , A_ : Dict=4 , A_ : int="gelu" , A_ : Tuple=0.1 , A_ : Tuple=0.1 , A_ : Dict=20 , A_ : int=2 , A_ : List[Any]=1 , A_ : Optional[Any]=0 , A_ : Dict=16 , A_ : Dict=16 , ) -> Dict:
"""simple docstring"""
lowerCamelCase_: str = parent
lowerCamelCase_: Tuple = batch_size
lowerCamelCase_: str = seq_length
lowerCamelCase_: Any = is_training
lowerCamelCase_: Tuple = use_labels
lowerCamelCase_: Any = vocab_size
lowerCamelCase_: Optional[Any] = hidden_size
lowerCamelCase_: Any = num_hidden_layers
lowerCamelCase_: Dict = num_attention_heads
lowerCamelCase_: Optional[Any] = intermediate_size
lowerCamelCase_: Optional[int] = hidden_act
lowerCamelCase_: Any = hidden_dropout_prob
lowerCamelCase_: Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase_: List[Any] = max_position_embeddings
lowerCamelCase_: Union[str, Any] = eos_token_id
lowerCamelCase_: Optional[int] = pad_token_id
lowerCamelCase_: Optional[Any] = bos_token_id
lowerCamelCase_: List[Any] = embed_dim
lowerCamelCase_: Optional[Any] = word_embed_proj_dim
lowerCamelCase_: Any = False
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase_: Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase_: List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase_: Any = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=A_ , **self.config_updates , )
lowerCamelCase_: Optional[Any] = prepare_opt_inputs_dict(A_ , A_ )
return config, inputs_dict
def lowerCAmelCase ( self : Any , A_ : Dict , A_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: List[Any] = TFOPTModel(config=A_ )
lowerCamelCase_: Union[str, Any] = inputs_dict["""input_ids"""]
lowerCamelCase_: List[str] = input_ids[:1, :]
lowerCamelCase_: int = inputs_dict["""attention_mask"""][:1, :]
lowerCamelCase_: Tuple = 1
# first forward pass
lowerCamelCase_: int = model(A_ , attention_mask=A_ , use_cache=A_ )
lowerCamelCase_ , lowerCamelCase_: Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_: List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_: Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase_: Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase_: Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase_: Any = model(A_ , attention_mask=A_ )[0]
lowerCamelCase_: List[str] = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase_: List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase_: Tuple = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase_: List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1e-3 )
@require_tf
class a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_A = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
_A = (TFOPTForCausalLM,) if is_tf_available() else ()
_A = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
_A = False
_A = False
_A = False
_A = 10
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_: List[str] = TFOPTModelTester(self )
lowerCamelCase_: Optional[Any] = ConfigTester(self , config_class=A_ )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(A_ : Optional[Any] , A_ : Union[str, Any] ):
if hasattr(A_ , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(A_ , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowerCamelCase_: List[Any] = model_class(config=A_ )
lowerCamelCase_: List[Any] = _get_word_embedding_weight(A_ , model.get_input_embeddings() )
lowerCamelCase_: List[Any] = _get_word_embedding_weight(A_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(A_ )
lowerCamelCase_: int = _get_word_embedding_weight(A_ , model.get_input_embeddings() )
lowerCamelCase_: List[Any] = _get_word_embedding_weight(A_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCamelCase_: List[Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , A_ )
# check that weights remain the same after resizing
lowerCamelCase_: int = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase_: Tuple = False
self.assertTrue(A_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , A_ )
lowerCamelCase_: Union[str, Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase_: Any = False
self.assertTrue(A_ )
def UpperCAmelCase_ ( _UpperCAmelCase ):
return tf.constant(_UpperCAmelCase , dtype=tf.intaa )
@require_tf
class a__ ( unittest.TestCase ):
_A = 99
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_: Dict = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCamelCase_: int = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCamelCase_: Tuple = input_ids.shape[0]
lowerCamelCase_: Optional[int] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class a__ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_: Dict = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
lowerCamelCase_: Dict = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
lowerCamelCase_: Union[str, Any] = tf.not_equal(A_ , model.config.pad_token_id )
with tf.GradientTape():
lowerCamelCase_: Optional[int] = model(input_ids=A_ , attention_mask=A_ ).last_hidden_state
lowerCamelCase_: Dict = (1, 11, 5_12)
self.assertEqual(output.shape , A_ )
lowerCamelCase_: int = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , A_ , atol=4e-3 ) )
lowerCamelCase_: Any = tf.function(A_ , jit_compile=A_ )
lowerCamelCase_: int = xla_generate(A_ , A_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , A_ , atol=4e-2 ) )
@require_tf
@slow
class a__ ( unittest.TestCase ):
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
lowerCamelCase_: List[str] = """facebook/opt-350m"""
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCamelCase_: Tuple = GPTaTokenizer.from_pretrained(self.path_model )
lowerCamelCase_: Optional[int] = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCamelCase_: int = tokenizer(A_ , return_tensors="""tf""" , padding=A_ , add_special_tokens=A_ )
lowerCamelCase_: List[str] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCamelCase_: int = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(A_ , A_ , atol=1e-4 ) )
lowerCamelCase_: Any = tf.function(A_ , jit_compile=A_ )
lowerCamelCase_: Any = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(A_ , A_ , atol=1e-4 ) )
@require_tf
@slow
class a__ ( unittest.TestCase ):
@property
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_: Dict = """facebook/opt-125m"""
lowerCamelCase_: Optional[int] = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase_: Union[str, Any] = []
lowerCamelCase_: str = GPTaTokenizer.from_pretrained(A_ )
lowerCamelCase_: Union[str, Any] = TFOPTForCausalLM.from_pretrained(A_ )
for prompt in self.prompts:
lowerCamelCase_: int = tokenizer(A_ , return_tensors="""tf""" ).input_ids
lowerCamelCase_: Optional[Any] = model.generate(A_ , max_length=10 )
lowerCamelCase_: List[Any] = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
predicted_outputs += generated_string
self.assertListEqual(A_ , A_ )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = """facebook/opt-350m"""
lowerCamelCase_: Optional[int] = GPTaTokenizer.from_pretrained(A_ )
lowerCamelCase_: Union[str, Any] = TFOPTForCausalLM.from_pretrained(A_ )
lowerCamelCase_: Optional[int] = """left"""
# use different length sentences to test batching
lowerCamelCase_: str = [
"""Hello, my dog is a little""",
"""Today, I""",
]
lowerCamelCase_: Any = tokenizer(A_ , return_tensors="""tf""" , padding=A_ )
lowerCamelCase_: int = inputs["""input_ids"""]
lowerCamelCase_: List[str] = model.generate(input_ids=A_ , attention_mask=inputs["""attention_mask"""] )
lowerCamelCase_: Tuple = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowerCamelCase_: Optional[int] = model.generate(input_ids=A_ )
lowerCamelCase_: Union[str, Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
lowerCamelCase_: Union[str, Any] = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowerCamelCase_: Dict = model.generate(input_ids=A_ , max_length=model.config.max_length - num_paddings )
lowerCamelCase_: int = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
lowerCamelCase_: Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_ )
lowerCamelCase_: Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=A_ )
lowerCamelCase_: Any = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , [non_padded_sentence, padded_sentence] )
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_: Dict = """facebook/opt-350m"""
lowerCamelCase_: Any = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase_: Union[str, Any] = []
lowerCamelCase_: Dict = GPTaTokenizer.from_pretrained(A_ )
lowerCamelCase_: Union[str, Any] = TFOPTForCausalLM.from_pretrained(A_ )
for prompt in self.prompts:
lowerCamelCase_: List[str] = tokenizer(A_ , return_tensors="""tf""" ).input_ids
lowerCamelCase_: Dict = model.generate(A_ , max_length=10 )
lowerCamelCase_: Optional[Any] = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
predicted_outputs += generated_string
self.assertListEqual(A_ , A_ )
| 584 | 1 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__UpperCAmelCase = logging.getLogger(__name__)
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if os.path.exists(__UpperCamelCase ):
if os.path.exists(os.path.join(__UpperCamelCase , """config.json""" ) ) and os.path.isfile(
os.path.join(__UpperCamelCase , """config.json""" ) ):
os.remove(os.path.join(__UpperCamelCase , """config.json""" ) )
if os.path.exists(os.path.join(__UpperCamelCase , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(__UpperCamelCase , """pytorch_model.bin""" ) ):
os.remove(os.path.join(__UpperCamelCase , """pytorch_model.bin""" ) )
else:
os.makedirs(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=False ):
'''simple docstring'''
UpperCAmelCase__ : str = 2
if unlogit:
UpperCAmelCase__ : Tuple = torch.pow(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = p * torch.log(__UpperCamelCase )
UpperCAmelCase__ : int = 0
return -plogp.sum(dim=-1 )
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
logger.info("""lv, h >\t""" + """\t""".join(F"{x + 1}" for x in range(len(__UpperCamelCase ) ) ) )
for row in range(len(__UpperCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(F"layer {row + 1}:\t" + """\t""".join(F"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(F"layer {row + 1}:\t" + """\t""".join(F"{x:d}" for x in tensor[row].cpu().data ) )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=False ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase__ : int = torch.zeros(__UpperCamelCase , __UpperCamelCase ).to(args.device )
UpperCAmelCase__ : Tuple = torch.zeros(__UpperCamelCase , __UpperCamelCase ).to(args.device )
if head_mask is None:
UpperCAmelCase__ : Tuple = torch.ones(__UpperCamelCase , __UpperCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__UpperCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Any = 0.0
UpperCAmelCase__ : List[str] = 0.0
for step, inputs in enumerate(tqdm(__UpperCamelCase , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase__ : Optional[int] = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase__) , ) : List[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase__ : Tuple = model(__UpperCamelCase , labels=__UpperCamelCase , head_mask=__UpperCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : str = entropy(attn.detach() , __UpperCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__UpperCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase__ : Optional[Any] = 2
UpperCAmelCase__ : Union[str, Any] = torch.pow(torch.pow(__UpperCamelCase , __UpperCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
UpperCAmelCase__ : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(__UpperCamelCase )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(__UpperCamelCase )
logger.info("""Head ranked by importance scores""" )
UpperCAmelCase__ : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase__ : Tuple = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase__ : Optional[int] = head_ranks.view_as(__UpperCamelCase )
print_ad_tensor(__UpperCamelCase )
return attn_entropy, head_importance, total_loss
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = compute_heads_importance(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , compute_entropy=__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , __UpperCamelCase , original_score * args.masking_threshold )
UpperCAmelCase__ : List[Any] = torch.ones_like(__UpperCamelCase )
UpperCAmelCase__ : Any = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase__ : Any = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase__ : List[Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase__ : Optional[int] = float("""Inf""" )
UpperCAmelCase__ : List[Any] = head_importance.view(-1 ).sort()[1]
if len(__UpperCamelCase ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
UpperCAmelCase__ : List[str] = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase__ : int = new_head_mask.view(-1 )
UpperCAmelCase__ : Dict = 0.0
UpperCAmelCase__ : Optional[int] = new_head_mask.view_as(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(__UpperCamelCase )
# Compute metric and head importance again
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = compute_heads_importance(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , compute_entropy=__UpperCamelCase , head_mask=__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , __UpperCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""" )
print_ad_tensor(__UpperCamelCase )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Dict = datetime.now()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = compute_heads_importance(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , compute_entropy=__UpperCamelCase , compute_importance=__UpperCamelCase , head_mask=__UpperCamelCase )
UpperCAmelCase__ : int = 1 / loss
UpperCAmelCase__ : int = datetime.now() - before_time
UpperCAmelCase__ : int = sum(p.numel() for p in model.parameters() )
UpperCAmelCase__ : Optional[int] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__UpperCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : List[str] = [
v,
]
assert sum(len(__UpperCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = sum(p.numel() for p in model.parameters() )
UpperCAmelCase__ : str = datetime.now()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = compute_heads_importance(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , compute_entropy=__UpperCamelCase , compute_importance=__UpperCamelCase , head_mask=__UpperCamelCase , actually_pruned=__UpperCamelCase , )
UpperCAmelCase__ : Optional[int] = 1 / loss
UpperCAmelCase__ : List[str] = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , __UpperCamelCase , __UpperCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , __UpperCamelCase , __UpperCamelCase )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 )
save_model(__UpperCamelCase , args.output_dir )
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=__UpperCamelCase , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=__UpperCamelCase , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=__UpperCamelCase , type=__UpperCamelCase , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=__UpperCamelCase , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=__UpperCamelCase , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=__UpperCamelCase , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=__UpperCamelCase , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__UpperCamelCase , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=__UpperCamelCase , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=__UpperCamelCase , default=42 )
parser.add_argument("""--local_rank""" , type=__UpperCamelCase , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=__UpperCamelCase , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=__UpperCamelCase , default="""""" , help="""Can be used for distant debugging.""" )
UpperCAmelCase__ : Any = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__UpperCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase__ : Dict = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
UpperCAmelCase__ : List[str] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase__ : List[Any] = torch.device("""cuda""" , args.local_rank )
UpperCAmelCase__ : List[str] = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase__ : Dict = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase__ : List[Any] = nn.parallel.DistributedDataParallel(
__UpperCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__UpperCamelCase )
elif args.n_gpu > 1:
UpperCAmelCase__ : Dict = nn.DataParallel(__UpperCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__UpperCamelCase )
torch.save(__UpperCamelCase , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , __UpperCamelCase )
# Prepare dataset
UpperCAmelCase__ : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase__ : Union[str, Any] = (torch.from_numpy(__UpperCamelCase ),)
UpperCAmelCase__ : Optional[Any] = TensorDataset(*__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = RandomSampler(__UpperCamelCase )
UpperCAmelCase__ : Tuple = DataLoader(__UpperCamelCase , sampler=__UpperCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase__ : Any = mask_heads(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
prune_heads(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 65 |
"""simple docstring"""
import requests
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {"""Content-Type""": """application/json"""}
UpperCAmelCase__ : Optional[Any] = requests.post(__UpperCamelCase , json={"""text""": message_body} , headers=__UpperCamelCase )
if response.status_code != 200:
UpperCAmelCase__ : Any = (
"""Request to slack returned an error """
F"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(__UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 65 | 1 |
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
a= subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
a= subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
a= '''|'''.join(sys.argv[1:])
a= re.compile(rF"""^({joined_dirs}).*?\.py$""")
a= [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 287 | '''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a= logging.get_logger(__name__)
a= {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a= {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
a= {'''facebook/blenderbot-3B''': 1_2_8}
class __lowercase ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ = BlenderbotTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ):
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , )
__UpperCamelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _lowerCamelCase ) != add_prefix_space:
__UpperCamelCase : Any = getattr(_lowerCamelCase , pre_tok_state.pop('type' ) )
__UpperCamelCase : Dict = add_prefix_space
__UpperCamelCase : Optional[Any] = pre_tok_class(**_lowerCamelCase )
__UpperCamelCase : str = add_prefix_space
__UpperCamelCase : Optional[int] = 'post_processor'
__UpperCamelCase : Tuple = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
if tokenizer_component_instance:
__UpperCamelCase : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__UpperCamelCase : List[Any] = tuple(state['sep'] )
if "cls" in state:
__UpperCamelCase : str = tuple(state['cls'] )
__UpperCamelCase : Tuple = False
if state.get('add_prefix_space' , _lowerCamelCase ) != add_prefix_space:
__UpperCamelCase : Dict = add_prefix_space
__UpperCamelCase : str = True
if state.get('trim_offsets' , _lowerCamelCase ) != trim_offsets:
__UpperCamelCase : int = trim_offsets
__UpperCamelCase : Any = True
if changes_to_apply:
__UpperCamelCase : Dict = getattr(_lowerCamelCase , state.pop('type' ) )
__UpperCamelCase : Any = component_class(**_lowerCamelCase )
setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCAmelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self , _lowerCamelCase ):
__UpperCamelCase : int = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value
__UpperCamelCase : Optional[Any] = value
def lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ):
__UpperCamelCase : Dict = kwargs.get('is_split_into_words' , _lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ):
__UpperCamelCase : Dict = kwargs.get('is_split_into_words' , _lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ):
__UpperCamelCase : List[str] = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ):
__UpperCamelCase : Union[str, Any] = [self.sep_token_id]
__UpperCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ):
return token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , _lowerCamelCase ):
__UpperCamelCase : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(_lowerCamelCase )
__UpperCamelCase : Union[str, Any] = ' '.join(_lowerCamelCase )
__UpperCamelCase : Union[str, Any] = self.encode(_lowerCamelCase )
if len(_lowerCamelCase ) > self.model_max_length:
__UpperCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 287 | 1 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a_ :Union[str, Any] = get_logger(__name__)
class snake_case__ ( enum.Enum ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = '''all_checks'''
_SCREAMING_SNAKE_CASE = '''basic_checks'''
_SCREAMING_SNAKE_CASE = '''no_checks'''
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def lowercase_ (A : Optional[dict] , A : dict , A : Any=None ):
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
snake_case__ : int = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
snake_case__ : List[Any] = ' for ' + verification_name if verification_name is not None else ''
if len(__UpperCamelCase ) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def lowercase_ (A : Optional[dict] , A : dict ):
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise UnexpectedSplits(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
snake_case__ : List[Any] = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__UpperCamelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__UpperCamelCase ) )
logger.info('All the splits matched successfully.' )
def lowercase_ (A : str , A : bool = True ):
if record_checksum:
snake_case__ : str = shaaaa()
with open(__UpperCamelCase , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , b'' ):
m.update(__UpperCamelCase )
snake_case__ : Optional[int] = m.hexdigest()
else:
snake_case__ : int = None
return {"num_bytes": os.path.getsize(__UpperCamelCase ), "checksum": checksum}
def lowercase_ (A : Optional[int] ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 478 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = ['''audio_values''', '''audio_mask''']
def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ):
'''simple docstring'''
super().__init__(
feature_size=A, sampling_rate=A, padding_value=A, **A, )
SCREAMING_SNAKE_CASE : str = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE : Dict = n_fft
SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE : str = sampling_rate
SCREAMING_SNAKE_CASE : int = padding_value
SCREAMING_SNAKE_CASE : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = spectrogram(
A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, )
SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1]
SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0
SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A, np.ndarray ):
SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa )
elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE : Tuple = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value
for i in range(len(A ) ):
SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features}
SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A )
return encoded_inputs
| 28 | 0 |
def a_ ( _A , _A ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(_A ) , _A )
return number - int(_A )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3))
| 372 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__UpperCamelCase : Optional[int] = 8
def a_ ( _A , _A=BITS ) -> List[Any]:
"""simple docstring"""
snake_case__ = x.device
snake_case__ = (x * 255).int().clamp(0 , 255 )
snake_case__ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_A )
snake_case__ = rearrange(_A , 'd -> d 1 1' )
snake_case__ = rearrange(_A , 'b c h w -> b c 1 h w' )
snake_case__ = ((x & mask) != 0).float()
snake_case__ = rearrange(_A , 'b c d h w -> b (c d) h w' )
snake_case__ = bits * 2 - 1
return bits
def a_ ( _A , _A=BITS ) -> List[str]:
"""simple docstring"""
snake_case__ = x.device
snake_case__ = (x > 0).int()
snake_case__ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_A , dtype=torch.intaa )
snake_case__ = rearrange(_A , 'd -> d 1 1' )
snake_case__ = rearrange(_A , 'b (c d) h w -> b c d h w' , d=8 )
snake_case__ = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def a_ ( self , _A , _A , _A , _A = 0.0 , _A = True , _A=None , _A = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
snake_case__ = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
snake_case__ = self.alphas_cumprod[timestep]
snake_case__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
snake_case__ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
snake_case__ = self.bit_scale
if self.config.clip_sample:
snake_case__ = torch.clamp(_A , -scale , _A )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
snake_case__ = self._get_variance(_A , _A )
snake_case__ = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
snake_case__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
snake_case__ = model_output.device if torch.is_tensor(_A ) else 'cpu'
snake_case__ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_A ).to(_A )
snake_case__ = self._get_variance(_A , _A ) ** 0.5 * eta * noise
snake_case__ = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_A , pred_original_sample=_A )
def a_ ( self , _A , _A , _A , _A="epsilon" , _A=None , _A = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
snake_case__ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
snake_case__ , snake_case__ = torch.split(_A , sample.shape[1] , dim=1 )
else:
snake_case__ = None
# 1. compute alphas, betas
snake_case__ = self.alphas_cumprod[t]
snake_case__ = self.alphas_cumprod[t - 1] if t > 0 else self.one
snake_case__ = 1 - alpha_prod_t
snake_case__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
snake_case__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
snake_case__ = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
snake_case__ = self.bit_scale
if self.config.clip_sample:
snake_case__ = torch.clamp(_A , -scale , _A )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
snake_case__ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
snake_case__ = 0
if t > 0:
snake_case__ = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_A ).to(model_output.device )
snake_case__ = (self._get_variance(_A , predicted_variance=_A ) ** 0.5) * noise
snake_case__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_A , pred_original_sample=_A )
class __SCREAMING_SNAKE_CASE( a_ ):
def __init__( self: Dict , UpperCamelCase: UNetaDConditionModel , UpperCamelCase: Union[DDIMScheduler, DDPMScheduler] , UpperCamelCase: Optional[float] = 1.0 , ) -> Union[str, Any]:
super().__init__()
snake_case__ = bit_scale
snake_case__ = (
ddim_bit_scheduler_step if isinstance(UpperCamelCase , UpperCamelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase )
@torch.no_grad()
def __call__( self: Dict , UpperCamelCase: Optional[int] = 2_56 , UpperCamelCase: Optional[int] = 2_56 , UpperCamelCase: Optional[int] = 50 , UpperCamelCase: Optional[torch.Generator] = None , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[str] = "pil" , UpperCamelCase: bool = True , **UpperCamelCase: int , ) -> Union[Tuple, ImagePipelineOutput]:
snake_case__ = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=UpperCamelCase , )
snake_case__ = decimal_to_bits(UpperCamelCase ) * self.bit_scale
snake_case__ = latents.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
snake_case__ = self.unet(UpperCamelCase , UpperCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case__ = self.scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
snake_case__ = bits_to_decimal(UpperCamelCase )
if output_type == "pil":
snake_case__ = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase )
| 372 | 1 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ = ['''image_processor''', '''tokenizer''']
lowerCamelCase_ = '''BlipImageProcessor'''
lowerCamelCase_ = '''AutoTokenizer'''
def __init__( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
# add QFormer tokenizer
A_ : List[str] = qformer_tokenizer
def __call__( self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
A_ : Dict = BatchFeature()
if text is not None:
A_ : List[Any] = self.tokenizer(
text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
encoding.update(_UpperCAmelCase )
A_ : Union[str, Any] = self.qformer_tokenizer(
text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
A_ : Optional[Any] = qformer_text_encoding.pop('input_ids' )
A_ : Optional[Any] = qformer_text_encoding.pop('attention_mask' )
if images is not None:
A_ : Any = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase )
encoding.update(_UpperCAmelCase )
return encoding
def lowerCAmelCase_ ( self , *lowercase , **lowercase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self , *lowercase , **lowercase ):
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.tokenizer.model_input_names
A_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCAmelCase_ ( self , lowercase , **lowercase ):
"""simple docstring"""
if os.path.isfile(_UpperCAmelCase ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
A_ : Dict = os.path.join(_UpperCAmelCase , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(_UpperCAmelCase )
return super().save_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls , lowercase , **lowercase ):
"""simple docstring"""
A_ : int = AutoTokenizer.from_pretrained(_UpperCAmelCase , subfolder='qformer_tokenizer' )
A_ : str = cls._get_arguments_from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
args.append(_UpperCAmelCase )
return cls(*_UpperCAmelCase )
| 558 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : Optional[Any] = 1_6
A : str = 3_2
def UpperCamelCase ( __magic_name__ : Accelerator , __magic_name__ : int = 16 ) -> List[str]:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__magic_name__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__magic_name__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
__magic_name__ , padding="""longest""" , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
lowercase__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A : Union[str, Any] = mocked_dataloaders # noqa: F811
def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __magic_name__ ) == "1":
lowercase__ = 2
# New Code #
lowercase__ = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowercase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__magic_name__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["""lr"""]
lowercase__ = int(config["""num_epochs"""] )
lowercase__ = int(config["""seed"""] )
lowercase__ = int(config["""batch_size"""] )
lowercase__ = evaluate.load("""glue""" , """mrpc""" )
set_seed(__magic_name__ )
lowercase__ , lowercase__ = get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=__magic_name__ )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=100 , num_training_steps=(len(__magic_name__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__magic_name__ ):
lowercase__ = model(**__magic_name__ )
lowercase__ = output.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**__magic_name__ )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __magic_name__ )
def UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__magic_name__ , default=__magic_name__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__magic_name__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowercase__ = parser.parse_args()
lowercase__ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 15 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__lowercase = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' ,safety_checker=lowercase__ ,cache_dir=lowercase__ )
__lowercase = [t[-1] for t in os.walk(os.path.join(lowercase__ ,os.listdir(lowercase__ )[0] ,'''snapshots''' ) )]
__lowercase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' ,safety_checker=lowercase__ )
__lowercase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 4
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(lowercase__ )
# shard inputs and rng
__lowercase = replicate(lowercase__ )
__lowercase = jax.random.split(lowercase__ ,lowercase__ )
__lowercase = shard(lowercase__ )
__lowercase = pipeline(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,jit=lowercase__ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(lowercase__ ,dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
__lowercase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowercase__ ) == num_samples
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''flax''' ,safety_checker=lowercase__ )
__lowercase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 5_0
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(lowercase__ )
# shard inputs and rng
__lowercase = replicate(lowercase__ )
__lowercase = jax.random.split(lowercase__ ,lowercase__ )
__lowercase = shard(lowercase__ )
__lowercase = pipeline(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,jit=lowercase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(lowercase__ ,dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,safety_checker=lowercase__ )
__lowercase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 5_0
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(lowercase__ )
# shard inputs and rng
__lowercase = replicate(lowercase__ )
__lowercase = jax.random.split(lowercase__ ,lowercase__ )
__lowercase = shard(lowercase__ )
__lowercase = pipeline(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,jit=lowercase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(lowercase__ ,dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa )
__lowercase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 5_0
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(lowercase__ )
# shard inputs and rng
__lowercase = replicate(lowercase__ )
__lowercase = jax.random.split(lowercase__ ,lowercase__ )
__lowercase = shard(lowercase__ )
__lowercase = pipeline(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,jit=lowercase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(lowercase__ ,dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,set_alpha_to_one=lowercase__ ,steps_offset=1 ,)
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,scheduler=lowercase__ ,safety_checker=lowercase__ ,)
__lowercase = scheduler.create_state()
__lowercase = scheduler_state
__lowercase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 5_0
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(lowercase__ )
# shard inputs and rng
__lowercase = replicate(lowercase__ )
__lowercase = jax.random.split(lowercase__ ,lowercase__ )
__lowercase = shard(lowercase__ )
__lowercase = pipeline(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,jit=lowercase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(lowercase__ ,dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = jax.random.split(jax.random.PRNGKey(0 ) ,lowercase__ )
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,safety_checker=lowercase__ ,)
__lowercase = replicate(lowercase__ )
__lowercase = pipeline.prepare_inputs(lowercase__ )
__lowercase = shard(lowercase__ )
__lowercase = pipeline(lowercase__ ,lowercase__ ,lowercase__ ,jit=lowercase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
__lowercase = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,safety_checker=lowercase__ ,use_memory_efficient_attention=lowercase__ ,)
__lowercase = replicate(lowercase__ )
__lowercase = pipeline.prepare_inputs(lowercase__ )
__lowercase = shard(lowercase__ )
__lowercase = pipeline(lowercase__ ,lowercase__ ,lowercase__ ,jit=lowercase__ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
__lowercase = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 624 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase_ :
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( *lowercase__ : Union[str, Any] ,**lowercase__ : Tuple ):
pass
def _A ( A__ ):
"""simple docstring"""
__lowercase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : List[str] ,lowercase__ : int ):
__lowercase = DepthEstimationPipeline(model=lowercase__ ,image_processor=lowercase__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ):
__lowercase = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} ,lowercase__ )
import datasets
__lowercase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' ,'''image''' ,split='''test''' )
__lowercase = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] ,lowercase__ ,)
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = '''Intel/dpt-large'''
__lowercase = pipeline('''depth-estimation''' ,model=lowercase__ )
__lowercase = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
__lowercase = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) ,2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) ,2.6_6_2 )
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 624 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[Any] = '''data2vec-text'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , _lowercase=None , **_lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = classifier_dropout
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 5 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
# Initialise PyTorch model
_lowerCAmelCase = BigBirdConfig.from_json_file(__lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
_lowerCAmelCase = BigBirdForQuestionAnswering(__lowerCamelCase )
else:
_lowerCAmelCase = BigBirdForPreTraining(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__lowerCamelCase, __lowerCamelCase, is_trivia_qa=__lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
a__ : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 589 | 0 |
"""simple docstring"""
from math import pi, sqrt, tan
def lowerCAmelCase ( UpperCamelCase_: float ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def lowerCAmelCase ( UpperCamelCase_: float , UpperCamelCase_: float , UpperCamelCase_: float ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCAmelCase ( UpperCamelCase_: float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def lowerCAmelCase ( UpperCamelCase_: float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def lowerCAmelCase ( UpperCamelCase_: float , UpperCamelCase_: float ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCAmelCase ( UpperCamelCase_: float , UpperCamelCase_: float , UpperCamelCase_: float ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
_a = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCAmelCase ( UpperCamelCase_: float , UpperCamelCase_: float ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def lowerCAmelCase ( UpperCamelCase_: float , UpperCamelCase_: float ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(UpperCamelCase_ , 2 ) * torus_radius * tube_radius
def lowerCAmelCase ( UpperCamelCase_: float , UpperCamelCase_: float ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def lowerCAmelCase ( UpperCamelCase_: float ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def lowerCAmelCase ( UpperCamelCase_: float , UpperCamelCase_: float ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def lowerCAmelCase ( UpperCamelCase_: float , UpperCamelCase_: float , UpperCamelCase_: float ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
_a = (sidea + sidea + sidea) / 2
_a = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCAmelCase ( UpperCamelCase_: float , UpperCamelCase_: float ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def lowerCAmelCase ( UpperCamelCase_: float , UpperCamelCase_: float , UpperCamelCase_: float ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def lowerCAmelCase ( UpperCamelCase_: float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def lowerCAmelCase ( UpperCamelCase_: float , UpperCamelCase_: float ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def lowerCAmelCase ( UpperCamelCase_: float , UpperCamelCase_: float ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCAmelCase ( UpperCamelCase_: int , UpperCamelCase_: float ) -> float:
'''simple docstring'''
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("""[DEMO] Areas of various geometric shapes: \n""")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("""\nSurface Areas of various geometric shapes: \n""")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 700 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowercase_ :
A__ : str = field(
default=_UpperCAmelCase, metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(_UpperCAmelCase )} )
A__ : str = field(
default=_UpperCAmelCase, metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
A__ : int = field(
default=128, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
A__ : int = field(
default=128, metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''}, )
A__ : int = field(
default=64, metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
}, )
A__ : int = field(
default=30, metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
}, )
A__ : bool = field(
default=_UpperCAmelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
A__ : bool = field(
default=_UpperCAmelCase, metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
A__ : float = field(
default=0.0, metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
A__ : int = field(
default=20, metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
A__ : int = field(
default=0, metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
}, )
A__ : int = field(default=1, metadata={'''help''': '''multiple threads for converting example to features'''} )
class lowercase_ (_UpperCAmelCase ):
A__ : Tuple = '''train'''
A__ : List[Any] = '''dev'''
class lowercase_ (_UpperCAmelCase ):
A__ : SquadDataTrainingArguments
A__ : List[SquadFeatures]
A__ : Split
A__ : bool
def __init__( self , a_ , a_ , a_ = None , a_ = Split.train , a_ = False , a_ = None , a_ = "pt" , ) ->List[str]:
'''simple docstring'''
_a = args
_a = is_language_sensitive
_a = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(a_ , a_ ):
try:
_a = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_a = mode
# Load data features from cache or dataset file
_a = "v2" if args.version_2_with_negative else "v1"
_a = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a = cached_features_file + ".lock"
with FileLock(a_ ):
if os.path.exists(a_ ) and not args.overwrite_cache:
_a = time.time()
_a = torch.load(a_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_a = self.old_features["features"]
_a = self.old_features.get("dataset" , a_ )
_a = self.old_features.get("examples" , a_ )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
_a = self.processor.get_dev_examples(args.data_dir )
else:
_a = self.processor.get_train_examples(args.data_dir )
_a , _a = squad_convert_examples_to_features(
examples=self.examples , tokenizer=a_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=a_ , )
_a = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , a_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) ->Optional[int]:
'''simple docstring'''
return len(self.features )
def __getitem__( self , a_ ) ->Dict[str, torch.Tensor]:
'''simple docstring'''
_a = self.features[i]
_a = torch.tensor(feature.input_ids , dtype=torch.long )
_a = torch.tensor(feature.attention_mask , dtype=torch.long )
_a = torch.tensor(feature.token_type_ids , dtype=torch.long )
_a = torch.tensor(feature.cls_index , dtype=torch.long )
_a = torch.tensor(feature.p_mask , dtype=torch.float )
_a = torch.tensor(feature.is_impossible , dtype=torch.float )
_a = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_a = torch.tensor(feature.start_position , dtype=torch.long )
_a = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 612 | 0 |
import numpy as np
def lowerCamelCase_ ( UpperCAmelCase_ : np.ndarray ):
return 1 / (1 + np.exp(-vector ))
def lowerCamelCase_ ( UpperCAmelCase_ : np.ndarray ):
return vector * sigmoid(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 583 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
snake_case__ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase ( __lowerCamelCase ):
a__: bool = field(default=__lowerCamelCase , metadata={"""help""": """Whether to use SortishSampler or not."""} )
a__: bool = field(
default=__lowerCamelCase , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
a__: Optional[int] = field(
default=__lowerCamelCase , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
a__: Optional[int] = field(
default=__lowerCamelCase , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
a__: Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowerCamelCase , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def _lowerCAmelCase ( self : List[str] ):
lowercase : Union[str, Any] = super().to_dict()
for k, v in d.items():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
lowercase : Tuple = v.to_dict()
return d
| 583 | 1 |
'''simple docstring'''
import socket
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
UpperCAmelCase = socket.gethostname()
UpperCAmelCase = 1_23_12
sock.connect((host, port) )
sock.send(b'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
UpperCAmelCase = sock.recv(10_24 )
if not data:
break
out_file.write(A )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 50 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(lowerCAmelCase ) # fails here
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(3 )
UpperCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 50 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_A: List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( UpperCAmelCase_ ):
def __init__( self , __A , __A ):
super().__init__()
self.register_modules(unet=__A , scheduler=__A )
@torch.no_grad()
def __call__( self , __A = 1 , __A = 100 , __A = None , __A = None , __A = True , ):
if audio_length_in_s is None:
__UpperCAmelCase = self.unet.config.sample_size / self.unet.config.sample_rate
__UpperCAmelCase = audio_length_in_s * self.unet.config.sample_rate
__UpperCAmelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'
f' {3 * down_scale_factor / self.unet.config.sample_rate}.' )
__UpperCAmelCase = int(__A )
if sample_size % down_scale_factor != 0:
__UpperCAmelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'
f' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'
' process.' )
__UpperCAmelCase = int(__A )
__UpperCAmelCase = next(iter(self.unet.parameters() ) ).dtype
__UpperCAmelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__A , __A ) and len(__A ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(__A )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
__UpperCAmelCase = randn_tensor(__A , generator=__A , device=self.device , dtype=__A )
# set step values
self.scheduler.set_timesteps(__A , device=audio.device )
__UpperCAmelCase = self.scheduler.timesteps.to(__A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__UpperCAmelCase = self.unet(__A , __A ).sample
# 2. compute previous image: x_t -> t_t-1
__UpperCAmelCase = self.scheduler.step(__A , __A , __A ).prev_sample
__UpperCAmelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
__UpperCAmelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__A )
| 126 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_A: Any = logging.get_logger(__name__)
class UpperCAmelCase ( UpperCAmelCase_ ):
def __init__( self , *__A , **__A ):
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , __A , )
super().__init__(*__A , **__A )
| 126 | 1 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 79 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=True , lowerCamelCase_="pt" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(""" """ ) else {}
SCREAMING_SNAKE_CASE : Optional[Any] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase_ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = input_ids.ne(lowerCamelCase_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str]="train" , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : int=None , lowerCamelCase_ : Union[str, Any]="" , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : str = Path(lowerCamelCase_ ).joinpath(type_path + """.source""" )
SCREAMING_SNAKE_CASE : Optional[Any] = Path(lowerCamelCase_ ).joinpath(type_path + """.target""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_char_lens(self.src_file )
SCREAMING_SNAKE_CASE : int = max_source_length
SCREAMING_SNAKE_CASE : str = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
SCREAMING_SNAKE_CASE : List[str] = tokenizer
SCREAMING_SNAKE_CASE : Dict = prefix
if n_obs is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.src_lens[:n_obs]
SCREAMING_SNAKE_CASE : int = src_lang
SCREAMING_SNAKE_CASE : Optional[int] = tgt_lang
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = index + 1 # linecache starts at 1
SCREAMING_SNAKE_CASE : Dict = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase_ ).rstrip("""\n""" )
SCREAMING_SNAKE_CASE : Dict = linecache.getline(str(self.tgt_file ) , lowerCamelCase_ ).rstrip("""\n""" )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCamelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer
)
SCREAMING_SNAKE_CASE : Any = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer
SCREAMING_SNAKE_CASE : Optional[int] = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_source_length , """right""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_target_length , """right""" )
SCREAMING_SNAKE_CASE : Tuple = source_inputs["""input_ids"""].squeeze()
SCREAMING_SNAKE_CASE : Tuple = target_inputs["""input_ids"""].squeeze()
SCREAMING_SNAKE_CASE : List[str] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCamelCase_ ( lowerCamelCase_ : Dict ):
'''simple docstring'''
return [len(lowerCamelCase_ ) for x in Path(lowerCamelCase_ ).open().readlines()]
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = torch.stack([x["""input_ids"""] for x in batch] )
SCREAMING_SNAKE_CASE : int = torch.stack([x["""attention_mask"""] for x in batch] )
SCREAMING_SNAKE_CASE : int = torch.stack([x["""decoder_input_ids"""] for x in batch] )
SCREAMING_SNAKE_CASE : int = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE : Dict = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE : Optional[int] = trim_batch(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = trim_batch(lowerCamelCase_ , lowerCamelCase_ , attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__UpperCAmelCase = getLogger(__name__)
def __A ( lowerCamelCase_ ):
"""simple docstring"""
return list(itertools.chain.from_iterable(lowerCamelCase_ ) )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = get_git_info()
save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , """git_log.json""" ) )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=4 , **lowerCamelCase_ ):
"""simple docstring"""
with open(lowerCamelCase_ , """w""" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
with open(lowerCamelCase_ ) as f:
return json.load(lowerCamelCase_ )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = git.Repo(search_parent_directories=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = {
"""repo_id""": str(lowerCamelCase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return list(map(lowerCamelCase_ , lowerCamelCase_ ) )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
with open(lowerCamelCase_ , """wb""" ) as f:
return pickle.dump(lowerCamelCase_ , lowerCamelCase_ )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
def remove_articles(lowerCamelCase_ ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , lowerCamelCase_ )
def white_space_fix(lowerCamelCase_ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = normalize_answer(lowerCamelCase_ ).split()
SCREAMING_SNAKE_CASE : Optional[int] = normalize_answer(lowerCamelCase_ ).split()
SCREAMING_SNAKE_CASE : Tuple = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = sum(common.values() )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE : Optional[int] = 1.0 * num_same / len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = 1.0 * num_same / len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ):
em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
em /= len(lowerCamelCase_ )
return {"em": em}
def __A ( lowerCamelCase_ ):
"""simple docstring"""
return model_prefix.startswith("""rag""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
SCREAMING_SNAKE_CASE : Dict = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase_ ) )
delattr(lowerCamelCase_ , lowerCamelCase_ )
continue
SCREAMING_SNAKE_CASE : Dict = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p]
setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
delattr(lowerCamelCase_ , lowerCamelCase_ )
return hparams, config
| 79 | 1 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __a ( a, a ):
"""simple docstring"""
_a = checkpoint
_a = {}
_a = vae_state_dict["encoder.conv_in.weight"]
_a = vae_state_dict["encoder.conv_in.bias"]
_a = vae_state_dict["encoder.conv_out.weight"]
_a = vae_state_dict["encoder.conv_out.bias"]
_a = vae_state_dict["encoder.norm_out.weight"]
_a = vae_state_dict["encoder.norm_out.bias"]
_a = vae_state_dict["decoder.conv_in.weight"]
_a = vae_state_dict["decoder.conv_in.bias"]
_a = vae_state_dict["decoder.conv_out.weight"]
_a = vae_state_dict["decoder.conv_out.bias"]
_a = vae_state_dict["decoder.norm_out.weight"]
_a = vae_state_dict["decoder.norm_out.bias"]
_a = vae_state_dict["quant_conv.weight"]
_a = vae_state_dict["quant_conv.bias"]
_a = vae_state_dict["post_quant_conv.weight"]
_a = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
_a = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
_a = {
layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(a )
}
# Retrieves the keys for the decoder up blocks only
_a = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
_a = {
layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(a )
}
for i in range(a ):
_a = [key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key]
if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
_a = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.weight' )
_a = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.bias' )
_a = renew_vae_resnet_paths(a )
_a = {"old": F'down.{i}.block', "new": F'down_blocks.{i}.resnets'}
assign_to_checkpoint(a, a, a, additional_replacements=[meta_path], config=a )
_a = [key for key in vae_state_dict if "encoder.mid.block" in key]
_a = 2
for i in range(1, num_mid_res_blocks + 1 ):
_a = [key for key in mid_resnets if F'encoder.mid.block_{i}' in key]
_a = renew_vae_resnet_paths(a )
_a = {"old": F'mid.block_{i}', "new": F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(a, a, a, additional_replacements=[meta_path], config=a )
_a = [key for key in vae_state_dict if "encoder.mid.attn" in key]
_a = renew_vae_attention_paths(a )
_a = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(a, a, a, additional_replacements=[meta_path], config=a )
conv_attn_to_linear(a )
for i in range(a ):
_a = num_up_blocks - 1 - i
_a = [
key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key
]
if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
_a = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.weight'
]
_a = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.bias'
]
_a = renew_vae_resnet_paths(a )
_a = {"old": F'up.{block_id}.block', "new": F'up_blocks.{i}.resnets'}
assign_to_checkpoint(a, a, a, additional_replacements=[meta_path], config=a )
_a = [key for key in vae_state_dict if "decoder.mid.block" in key]
_a = 2
for i in range(1, num_mid_res_blocks + 1 ):
_a = [key for key in mid_resnets if F'decoder.mid.block_{i}' in key]
_a = renew_vae_resnet_paths(a )
_a = {"old": F'mid.block_{i}', "new": F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(a, a, a, additional_replacements=[meta_path], config=a )
_a = [key for key in vae_state_dict if "decoder.mid.attn" in key]
_a = renew_vae_attention_paths(a )
_a = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(a, a, a, additional_replacements=[meta_path], config=a )
conv_attn_to_linear(a )
return new_checkpoint
def __a ( a, a, ):
"""simple docstring"""
_a = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
_a = io.BytesIO(r.content )
_a = OmegaConf.load(a )
_a = 5_1_2
_a = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
_a = {}
with safe_open(a, framework="pt", device="cpu" ) as f:
for key in f.keys():
_a = f.get_tensor(a )
else:
_a = torch.load(a, map_location=a )["state_dict"]
# Convert the VAE model.
_a = create_vae_diffusers_config(a, image_size=a )
_a = custom_convert_ldm_vae_checkpoint(a, a )
_a = AutoencoderKL(**a )
vae.load_state_dict(a )
vae.save_pretrained(a )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
__SCREAMING_SNAKE_CASE = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 388 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""")
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
# A mock response for an HTTP head request to emulate server down
_a = mock.Mock()
_a = 500
_a = {}
_a = HTTPError
_a = {}
# Download this model to make sure it's in the cache.
_a = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=UpperCamelCase__ ) as mock_head:
_a = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE_ ( self :str ):
# This test is for deprecated behavior and can be removed in v5
_a = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls :Any ):
_a = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls :Any ):
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase__ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
_a = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase__ , repo_id="test-feature-extractor" , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
_a = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE_ ( self :str ):
_a = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
_a = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
_a = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
CustomFeatureExtractor.register_for_auto_class()
_a = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
_a = AutoFeatureExtractor.from_pretrained(
f'{USER}/test-dynamic-feature-extractor' , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 388 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Dict = "bert"
def __init__( self : Any , lowerCamelCase__ : List[Any]=3_05_22 , lowerCamelCase__ : Dict=7_68 , lowerCamelCase__ : List[str]=12 , lowerCamelCase__ : List[Any]=12 , lowerCamelCase__ : str=30_72 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : str=0.1 , lowerCamelCase__ : Tuple=5_12 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : List[Any]=1E-12 , lowerCamelCase__ : str=0 , lowerCamelCase__ : Optional[Any]="absolute" , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Tuple=None , **lowerCamelCase__ : str , ) ->Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : int = vocab_size
_UpperCAmelCase : Union[str, Any] = hidden_size
_UpperCAmelCase : List[Any] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : Dict = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = max_position_embeddings
_UpperCAmelCase : Optional[int] = type_vocab_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : str = layer_norm_eps
_UpperCAmelCase : Any = position_embedding_type
_UpperCAmelCase : Any = use_cache
_UpperCAmelCase : Union[str, Any] = classifier_dropout
class lowerCAmelCase__ ( UpperCAmelCase__ ):
@property
def lowerCAmelCase__ ( self : Tuple ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCAmelCase : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 40 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __lowerCAmelCase (__lowerCAmelCase ):
return np.maximum(0 , __lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 40 | 1 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
SCREAMING_SNAKE_CASE__ = '''hf-internal-testing/tiny-random-bert'''
SCREAMING_SNAKE_CASE__ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
SCREAMING_SNAKE_CASE__ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : Tuple = cached_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(SCREAMING_SNAKE_CASE__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'refs' , 'main' ) ) as f:
__a : List[str] = f.read()
self.assertEqual(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , 'snapshots' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(os.path.isfile(SCREAMING_SNAKE_CASE__ ) )
# File is cached at the same place the second time.
__a : Any = cached_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Using a specific revision to test the full commit hash.
__a : Tuple = cached_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , revision='9b8c223' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , 'snapshots' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'is not a valid model identifier' ):
__a : Union[str, Any] = cached_file('tiny-random-bert' , SCREAMING_SNAKE_CASE__ )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'is not a valid git identifier' ):
__a : List[str] = cached_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , revision='aaaa' )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'does not appear to have a file named' ):
__a : Dict = cached_file(SCREAMING_SNAKE_CASE__ , 'conf' )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'does not appear to have a file named' ):
__a : Any = cached_file(SCREAMING_SNAKE_CASE__ , 'conf' )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'refs' , 'main' ) ) as f:
__a : Any = f.read()
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , '.no_exist' , SCREAMING_SNAKE_CASE__ , 'conf' ) ) )
__a : List[Any] = cached_file(SCREAMING_SNAKE_CASE__ , 'conf' , _raise_exceptions_for_missing_entries=SCREAMING_SNAKE_CASE__ )
self.assertIsNone(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = cached_file(SCREAMING_SNAKE_CASE__ , 'conf' , local_files_only=SCREAMING_SNAKE_CASE__ , _raise_exceptions_for_missing_entries=SCREAMING_SNAKE_CASE__ )
self.assertIsNone(SCREAMING_SNAKE_CASE__ )
__a : int = mock.Mock()
__a : List[Any] = 5_0_0
__a : Dict = {}
__a : str = HTTPError
__a : Optional[int] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=SCREAMING_SNAKE_CASE__ ) as mock_head:
__a : Optional[int] = cached_file(SCREAMING_SNAKE_CASE__ , 'conf' , _raise_exceptions_for_connection_errors=SCREAMING_SNAKE_CASE__ )
self.assertIsNone(SCREAMING_SNAKE_CASE__ )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , SCREAMING_SNAKE_CASE__ ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , SCREAMING_SNAKE_CASE__ ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'is not a valid model identifier' ):
get_file_from_repo('bert-base-case' , SCREAMING_SNAKE_CASE__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'is not a valid git identifier' ):
get_file_from_repo('bert-base-cased' , SCREAMING_SNAKE_CASE__ , revision='ahaha' )
__a : Optional[Any] = get_file_from_repo('bert-base-cased' , SCREAMING_SNAKE_CASE__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
__a : Optional[Any] = json.loads(open(SCREAMING_SNAKE_CASE__ , 'r' ).read() )
self.assertEqual(config['hidden_size'] , 7_6_8 )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a : List[str] = Path(SCREAMING_SNAKE_CASE__ ) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(SCREAMING_SNAKE_CASE__ , 'a.txt' ) , str(SCREAMING_SNAKE_CASE__ ) )
self.assertIsNone(get_file_from_repo(SCREAMING_SNAKE_CASE__ , 'b.txt' ) )
| 47 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : Dict = logging.get_logger(__name__)
snake_case__ : Any = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class _a ( A__ ):
"""simple docstring"""
snake_case ="""mobilenet_v2"""
def __init__( self , _snake_case=3 , _snake_case=224 , _snake_case=1.0 , _snake_case=8 , _snake_case=8 , _snake_case=6 , _snake_case=32 , _snake_case=True , _snake_case=True , _snake_case="relu6" , _snake_case=True , _snake_case=0.8 , _snake_case=0.02 , _snake_case=0.001 , _snake_case=255 , **_snake_case , ):
super().__init__(**_snake_case )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_UpperCAmelCase =num_channels
_UpperCAmelCase =image_size
_UpperCAmelCase =depth_multiplier
_UpperCAmelCase =depth_divisible_by
_UpperCAmelCase =min_depth
_UpperCAmelCase =expand_ratio
_UpperCAmelCase =output_stride
_UpperCAmelCase =first_layer_is_expansion
_UpperCAmelCase =finegrained_output
_UpperCAmelCase =hidden_act
_UpperCAmelCase =tf_padding
_UpperCAmelCase =classifier_dropout_prob
_UpperCAmelCase =initializer_range
_UpperCAmelCase =layer_norm_eps
_UpperCAmelCase =semantic_loss_ignore_index
class _a ( A__ ):
"""simple docstring"""
snake_case =version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE ( self ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def SCREAMING_SNAKE_CASE ( self ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def SCREAMING_SNAKE_CASE ( self ):
return 1E-4
| 408 | 0 |
import random
from typing import Any
def __UpperCAmelCase ( snake_case_ : list ):
'''simple docstring'''
for _ in range(len(snake_case_ ) ):
UpperCAmelCase: List[str] = random.randint(0 , len(snake_case_ ) - 1 )
UpperCAmelCase: Optional[int] = random.randint(0 , len(snake_case_ ) - 1 )
UpperCAmelCase , UpperCAmelCase: Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
snake_case_ : Optional[int] = [0, 1, 2, 3, 4, 5, 6, 7]
snake_case_ : Union[str, Any] = ['''python''', '''says''', '''hello''', '''!''']
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 717 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case_ : Optional[int] = '\nimport os\n'
snake_case_ : str = '\ndef foo():\n import os\n return False\n'
snake_case_ : List[str] = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
snake_case_ : int = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
snake_case_ : Optional[int] = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
snake_case_ : List[str] = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
snake_case_ : List[Any] = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
snake_case_ : Optional[Any] = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
snake_case_ : Optional[Any] = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
snake_case_ : List[str] = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
snake_case_ : Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case" , snake_case_ )
def __UpperCAmelCase ( snake_case_ : Tuple , snake_case_ : Tuple ):
'''simple docstring'''
UpperCAmelCase: List[Any] = os.path.join(snake_case_ , "test_file.py" )
with open(snake_case_ , "w" ) as _tmp_file:
_tmp_file.write(snake_case_ )
UpperCAmelCase: Tuple = get_imports(snake_case_ )
assert parsed_imports == ["os"]
| 166 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__UpperCAmelCase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Dict = "ernie_m"
lowercase__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , lowerCamelCase_ = 25_00_02 , lowerCamelCase_ = 7_68 , lowerCamelCase_ = 12 , lowerCamelCase_ = 12 , lowerCamelCase_ = 30_72 , lowerCamelCase_ = "gelu" , lowerCamelCase_ = 0.1 , lowerCamelCase_ = 0.1 , lowerCamelCase_ = 5_14 , lowerCamelCase_ = 0.02 , lowerCamelCase_ = 1 , lowerCamelCase_ = 1e-05 , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_=0.0 , **lowerCamelCase_ , ) -> str:
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = classifier_dropout
lowerCAmelCase__ = is_decoder
lowerCAmelCase__ = act_dropout | 90 |
'''simple docstring'''
def lowerCamelCase_ ( A_ = 3 , A_ = 7 , A_ = 1_00_00_00 ):
__lowerCamelCase = 0
__lowerCamelCase = 1
for current_denominator in range(1 , limit + 1 ):
__lowerCamelCase = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__lowerCamelCase = current_numerator
__lowerCamelCase = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 316 | 0 |
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __lowerCamelCase ( __a ):
lowerCamelCase__: int = "facebook/bart-large-mnli"
lowerCamelCase__: Optional[Any] = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
lowerCamelCase__: Dict = "text_classifier"
lowerCamelCase__: str = AutoTokenizer
lowerCamelCase__: List[str] = AutoModelForSequenceClassification
lowerCamelCase__: str = ["text", ["text"]]
lowerCamelCase__: List[Any] = ["text"]
def A__ ( self ) -> int:
"""simple docstring"""
super().setup()
UpperCAmelCase: Any = self.model.config
UpperCAmelCase: Optional[Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
UpperCAmelCase: List[Any] = int(lowerCAmelCase_ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def A__ ( self , __snake_case , __snake_case ) -> Any:
"""simple docstring"""
UpperCAmelCase: List[str] = labels
return self.pre_processor(
[text] * len(lowerCAmelCase_ ) , [F'This example is {label}' for label in labels] , return_tensors="pt" , padding="max_length" , )
def A__ ( self , __snake_case ) -> Dict:
"""simple docstring"""
UpperCAmelCase: Tuple = outputs.logits
UpperCAmelCase: List[Any] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 719 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case_ : Optional[int] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : Optional[int]=8 ):
'''simple docstring'''
UpperCAmelCase: Any = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase: Dict = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __lowerCamelCase ( lowercase ):
def __init__( self , __snake_case , __snake_case , __snake_case , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=__snake_case , scheduler=__snake_case , movq=__snake_case , )
UpperCAmelCase: List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
"""simple docstring"""
if latents is None:
UpperCAmelCase: Tuple = randn_tensor(__snake_case , generator=__snake_case , device=__snake_case , dtype=__snake_case )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
UpperCAmelCase: str = latents.to(__snake_case )
UpperCAmelCase: Tuple = latents * scheduler.init_noise_sigma
return latents
def A__ ( self , __snake_case=0 ) -> str:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase: Union[str, Any] = torch.device(F'cuda:{gpu_id}' )
UpperCAmelCase: Optional[int] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__snake_case , __snake_case )
def A__ ( self , __snake_case=0 ) -> List[Any]:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase: Optional[Any] = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase: int = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase: str = cpu_offload_with_hook(__snake_case , __snake_case , prev_module_hook=__snake_case )
# We'll offload the last model manually.
UpperCAmelCase: Union[str, Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A__ ( self ) -> Any:
"""simple docstring"""
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__snake_case , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__snake_case )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case = 5_1_2 , __snake_case = 5_1_2 , __snake_case = 1_0_0 , __snake_case = 4.0 , __snake_case = 1 , __snake_case = None , __snake_case = None , __snake_case = "pil" , __snake_case = True , ) -> Dict:
"""simple docstring"""
UpperCAmelCase: Optional[int] = self._execution_device
UpperCAmelCase: Optional[int] = guidance_scale > 1.0
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase: int = torch.cat(__snake_case , dim=0 )
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase: List[Any] = torch.cat(__snake_case , dim=0 )
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase: List[Any] = torch.cat(__snake_case , dim=0 )
UpperCAmelCase: Dict = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
UpperCAmelCase: Dict = image_embeds.repeat_interleave(__snake_case , dim=0 )
UpperCAmelCase: Dict = negative_image_embeds.repeat_interleave(__snake_case , dim=0 )
UpperCAmelCase: Tuple = hint.repeat_interleave(__snake_case , dim=0 )
UpperCAmelCase: Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__snake_case )
UpperCAmelCase: Any = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__snake_case )
self.scheduler.set_timesteps(__snake_case , device=__snake_case )
UpperCAmelCase: Any = self.scheduler.timesteps
UpperCAmelCase: List[str] = self.movq.config.latent_channels
UpperCAmelCase , UpperCAmelCase: Union[str, Any] = downscale_height_and_width(__snake_case , __snake_case , self.movq_scale_factor )
# create initial latent
UpperCAmelCase: Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __snake_case , __snake_case , __snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase: List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase: str = {"image_embeds": image_embeds, "hint": hint}
UpperCAmelCase: Any = self.unet(
sample=__snake_case , timestep=__snake_case , encoder_hidden_states=__snake_case , added_cond_kwargs=__snake_case , return_dict=__snake_case , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase: Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase: str = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase: Dict = variance_pred.chunk(2 )
UpperCAmelCase: List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase: Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase: Any = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase: int = self.scheduler.step(
__snake_case , __snake_case , __snake_case , generator=__snake_case , )[0]
# post-processing
UpperCAmelCase: Optional[Any] = self.movq.decode(__snake_case , force_not_quantize=__snake_case )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
UpperCAmelCase: Optional[Any] = image * 0.5 + 0.5
UpperCAmelCase: Union[str, Any] = image.clamp(0 , 1 )
UpperCAmelCase: List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase: Dict = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 166 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 354 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Any ) -> Dict:
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __lowerCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ) -> List[Any]:
__lowerCAmelCase =tmp_path / """cache"""
__lowerCAmelCase ={"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase =TextDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_text_dataset(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def __lowerCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> List[Any]:
__lowerCAmelCase =tmp_path / """cache"""
__lowerCAmelCase ={"""text""": """string"""}
__lowerCAmelCase =features.copy() if features else default_expected_features
__lowerCAmelCase =(
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase =TextDatasetReader(__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_text_dataset(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __lowerCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Any:
__lowerCAmelCase =tmp_path / """cache"""
__lowerCAmelCase ={"""text""": """string"""}
__lowerCAmelCase =TextDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase , split=__lowerCamelCase ).read()
_check_text_dataset(__lowerCamelCase , __lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def __lowerCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> Any:
if issubclass(__lowerCamelCase , __lowerCamelCase ):
__lowerCAmelCase =text_path
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
__lowerCAmelCase =[text_path]
__lowerCAmelCase =tmp_path / """cache"""
__lowerCAmelCase ={"""text""": """string"""}
__lowerCAmelCase =TextDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_text_dataset(__lowerCamelCase , __lowerCamelCase )
def __lowerCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : str=("train",) ) -> Dict:
assert isinstance(__lowerCamelCase , __lowerCamelCase )
for split in splits:
__lowerCAmelCase =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __lowerCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] ) -> Dict:
__lowerCAmelCase =tmp_path / """cache"""
__lowerCAmelCase ={"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase =TextDatasetReader({"""train""": text_path} , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_text_datasetdict(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def __lowerCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ) -> Tuple:
__lowerCAmelCase =tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__lowerCAmelCase ={"""text""": """string"""}
__lowerCAmelCase =features.copy() if features else default_expected_features
__lowerCAmelCase =(
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase =TextDatasetReader({"""train""": text_path} , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_text_datasetdict(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __lowerCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Any ) -> int:
if split:
__lowerCAmelCase ={split: text_path}
else:
__lowerCAmelCase ="""train"""
__lowerCAmelCase ={"""train""": text_path, """test""": text_path}
__lowerCAmelCase =tmp_path / """cache"""
__lowerCAmelCase ={"""text""": """string"""}
__lowerCAmelCase =TextDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_text_datasetdict(__lowerCamelCase , __lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 354 | 1 |
'''simple docstring'''
import math
import sys
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
if number != int(__A ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
UpperCamelCase__ = [-1] * (number + 1)
UpperCamelCase__ = 0
for i in range(1 , number + 1 ):
UpperCamelCase__ = sys.maxsize
UpperCamelCase__ = int(math.sqrt(__A ) )
for j in range(1 , root + 1 ):
UpperCamelCase__ = 1 + answers[i - (j**2)]
UpperCamelCase__ = min(__A , __A )
UpperCamelCase__ = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 223 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A , __A ) -> Optional[int]:
'''simple docstring'''
if len(__A ) <= 1 or n <= 1:
return
insert_next(__A , n - 1 )
rec_insertion_sort(__A , n - 1 )
def _UpperCamelCase ( __A , __A ) -> str:
'''simple docstring'''
if index >= len(__A ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
UpperCamelCase__ , UpperCamelCase__ = (
collection[index],
collection[index - 1],
)
insert_next(__A , index + 1 )
if __name__ == "__main__":
a__ : str = input('Enter integers separated by spaces: ')
a__ : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 223 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 55 |
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case ) -> list[int]:
'''simple docstring'''
lowerCamelCase__ = 0
lowerCamelCase__ = len(__snake_case ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCamelCase__ = i + 1
else:
lowerCamelCase__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 481 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : List[Any] = {
"""configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = ["""AlbertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = ["""AlbertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"""ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AlbertForMaskedLM""",
"""AlbertForMultipleChoice""",
"""AlbertForPreTraining""",
"""AlbertForQuestionAnswering""",
"""AlbertForSequenceClassification""",
"""AlbertForTokenClassification""",
"""AlbertModel""",
"""AlbertPreTrainedModel""",
"""load_tf_weights_in_albert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"""TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAlbertForMaskedLM""",
"""TFAlbertForMultipleChoice""",
"""TFAlbertForPreTraining""",
"""TFAlbertForQuestionAnswering""",
"""TFAlbertForSequenceClassification""",
"""TFAlbertForTokenClassification""",
"""TFAlbertMainLayer""",
"""TFAlbertModel""",
"""TFAlbertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"""FlaxAlbertForMaskedLM""",
"""FlaxAlbertForMultipleChoice""",
"""FlaxAlbertForPreTraining""",
"""FlaxAlbertForQuestionAnswering""",
"""FlaxAlbertForSequenceClassification""",
"""FlaxAlbertForTokenClassification""",
"""FlaxAlbertModel""",
"""FlaxAlbertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 719 |
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
A : Optional[int] = len(lowerCamelCase_ )
A : List[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
A : Tuple = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
A : List[str] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
A : str = subset[i - 1][j]
if arr[i - 1] <= j:
A : str = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 423 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = '▁'
__SCREAMING_SNAKE_CASE = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
__SCREAMING_SNAKE_CASE = {
'facebook/m2m100_418M': 1_024,
}
# fmt: off
__SCREAMING_SNAKE_CASE = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class a__ ( A__ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def __init__( self :Optional[int] , _lowerCamelCase :List[str] , _lowerCamelCase :Any , _lowerCamelCase :Optional[Any]=None , _lowerCamelCase :Union[str, Any]=None , _lowerCamelCase :Optional[Any]="<s>" , _lowerCamelCase :Optional[Any]="</s>" , _lowerCamelCase :Any="</s>" , _lowerCamelCase :Optional[Any]="<pad>" , _lowerCamelCase :Tuple="<unk>" , _lowerCamelCase :Union[str, Any]="m2m100" , _lowerCamelCase :Optional[Dict[str, Any]] = None , _lowerCamelCase :List[str]=8 , **_lowerCamelCase :List[Any] , ):
'''simple docstring'''
UpperCamelCase_ : Any ={} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase_ : Union[str, Any] =language_codes
UpperCamelCase_ : List[Any] =FAIRSEQ_LANGUAGE_CODES[language_codes]
UpperCamelCase_ : Optional[int] ={lang_code: f'''__{lang_code}__''' for lang_code in fairseq_language_code}
UpperCamelCase_ : Tuple =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(_lowerCamelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(_lowerCamelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , language_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=_lowerCamelCase , **_lowerCamelCase , )
UpperCamelCase_ : Union[str, Any] =vocab_file
UpperCamelCase_ : Dict =load_json(_lowerCamelCase )
UpperCamelCase_ : Tuple ={v: k for k, v in self.encoder.items()}
UpperCamelCase_ : List[Any] =spm_file
UpperCamelCase_ : Any =load_spm(_lowerCamelCase , self.sp_model_kwargs )
UpperCamelCase_ : Union[str, Any] =len(self.encoder )
UpperCamelCase_ : Union[str, Any] ={
self.get_lang_token(_lowerCamelCase ): self.encoder_size + i for i, lang_code in enumerate(_lowerCamelCase )
}
UpperCamelCase_ : Optional[Any] ={lang_code: self.encoder_size + i for i, lang_code in enumerate(_lowerCamelCase )}
UpperCamelCase_ : str ={v: k for k, v in self.lang_token_to_id.items()}
UpperCamelCase_ : Optional[int] =src_lang if src_lang is not None else 'en'
UpperCamelCase_ : str =tgt_lang
UpperCamelCase_ : int =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
UpperCamelCase_ : Optional[Any] =num_madeup_words
@property
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def lowerCamelCase_ ( self :Optional[int] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCamelCase_ ( self :Optional[int] , _lowerCamelCase :str ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase_ ( self :Optional[int] , _lowerCamelCase :str ):
'''simple docstring'''
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def lowerCamelCase_ ( self :List[Any] , _lowerCamelCase :Tuple ):
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] )
def lowerCamelCase_ ( self :str , _lowerCamelCase :int ):
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(_lowerCamelCase , self.unk_token )
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] =[]
UpperCamelCase_ : str =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCamelCase ) + token
UpperCamelCase_ : Optional[int] =[]
else:
current_sub_tokens.append(_lowerCamelCase )
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def lowerCamelCase_ ( self :str , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None , _lowerCamelCase :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
UpperCamelCase_ : Dict =[1] * len(self.prefix_tokens )
UpperCamelCase_ : int =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase_ ( self :Union[str, Any] ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] ={self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :List[Any] ):
'''simple docstring'''
UpperCamelCase_ : Dict =self.__dict__.copy()
UpperCamelCase_ : Dict =None
return state
def __setstate__( self :Tuple , _lowerCamelCase :Dict ):
'''simple docstring'''
UpperCamelCase_ : List[str] =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase_ : List[str] ={}
UpperCamelCase_ : Union[str, Any] =load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCamelCase_ ( self :List[Any] , _lowerCamelCase :str , _lowerCamelCase :Optional[str] = None ):
'''simple docstring'''
UpperCamelCase_ : Any =Path(_lowerCamelCase )
if not save_dir.is_dir():
raise OSError(f'''{save_directory} should be a directory''' )
UpperCamelCase_ : Union[str, Any] =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
UpperCamelCase_ : Optional[Any] =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCamelCase , 'wb' ) as fi:
UpperCamelCase_ : Any =self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (str(_lowerCamelCase ), str(_lowerCamelCase ))
def lowerCamelCase_ ( self :int , _lowerCamelCase :List[str] , _lowerCamelCase :str = "en" , _lowerCamelCase :Optional[List[str]] = None , _lowerCamelCase :str = "ro" , **_lowerCamelCase :Union[str, Any] , ):
'''simple docstring'''
UpperCamelCase_ : Dict =src_lang
UpperCamelCase_ : Optional[Any] =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def lowerCamelCase_ ( self :str , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Optional[str] , _lowerCamelCase :Optional[str] , **_lowerCamelCase :int ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCamelCase_ : Any =src_lang
UpperCamelCase_ : Tuple =self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_ : Union[str, Any] =self.get_lang_id(_lowerCamelCase )
UpperCamelCase_ : Dict =tgt_lang_id
return inputs
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase_ ( self :Any , _lowerCamelCase :str ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =self.get_lang_token(_lowerCamelCase )
UpperCamelCase_ : List[str] =self.lang_token_to_id[lang_token]
UpperCamelCase_ : Tuple =[self.cur_lang_id]
UpperCamelCase_ : List[Any] =[self.eos_token_id]
def lowerCamelCase_ ( self :str , _lowerCamelCase :str ):
'''simple docstring'''
UpperCamelCase_ : Any =self.get_lang_token(_lowerCamelCase )
UpperCamelCase_ : Dict =self.lang_token_to_id[lang_token]
UpperCamelCase_ : Dict =[self.cur_lang_id]
UpperCamelCase_ : Optional[int] =[self.eos_token_id]
def lowerCamelCase_ ( self :Any , _lowerCamelCase :str ):
'''simple docstring'''
return self.lang_code_to_token[lang]
def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :str ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =self.get_lang_token(_lowerCamelCase )
return self.lang_token_to_id[lang_token]
def A_ ( __lowercase , __lowercase ):
UpperCamelCase_ : Any =sentencepiece.SentencePieceProcessor(**__lowercase )
spm.Load(str(__lowercase ) )
return spm
def A_ ( __lowercase ):
with open(__lowercase , 'r' ) as f:
return json.load(__lowercase )
def A_ ( __lowercase , __lowercase ):
with open(__lowercase , 'w' ) as f:
json.dump(__lowercase , __lowercase , indent=2 )
| 357 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
__SCREAMING_SNAKE_CASE = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
__SCREAMING_SNAKE_CASE = '▁'
class a__ ( A__ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :Optional[Any] , _lowerCamelCase :int , _lowerCamelCase :List[Any]=True , _lowerCamelCase :int=True , _lowerCamelCase :Tuple=False , _lowerCamelCase :Optional[Any]="[CLS]" , _lowerCamelCase :List[Any]="[SEP]" , _lowerCamelCase :Dict="<unk>" , _lowerCamelCase :int="[SEP]" , _lowerCamelCase :int="<pad>" , _lowerCamelCase :Optional[Any]="[CLS]" , _lowerCamelCase :str="[MASK]" , _lowerCamelCase :Optional[Dict[str, Any]] = None , **_lowerCamelCase :Union[str, Any] , ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] =(
AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase , normalized=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase )
else mask_token
)
UpperCamelCase_ : Any ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
UpperCamelCase_ : str =do_lower_case
UpperCamelCase_ : Any =remove_space
UpperCamelCase_ : List[Any] =keep_accents
UpperCamelCase_ : Optional[Any] =vocab_file
UpperCamelCase_ : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def lowerCamelCase_ ( self :List[str] ):
'''simple docstring'''
return len(self.sp_model )
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] ={self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Optional[int] ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =self.__dict__.copy()
UpperCamelCase_ : Optional[Any] =None
return state
def __setstate__( self :List[Any] , _lowerCamelCase :Any ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase_ : Tuple ={}
UpperCamelCase_ : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self :Union[str, Any] , _lowerCamelCase :Tuple ):
'''simple docstring'''
if self.remove_space:
UpperCamelCase_ : Union[str, Any] =' '.join(inputs.strip().split() )
else:
UpperCamelCase_ : List[Any] =inputs
UpperCamelCase_ : Optional[int] =outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
UpperCamelCase_ : Optional[Any] =unicodedata.normalize('NFKD' , _lowerCamelCase )
UpperCamelCase_ : Tuple =''.join([c for c in outputs if not unicodedata.combining(_lowerCamelCase )] )
if self.do_lower_case:
UpperCamelCase_ : Optional[Any] =outputs.lower()
return outputs
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :str ):
'''simple docstring'''
UpperCamelCase_ : str =self.preprocess_text(_lowerCamelCase )
UpperCamelCase_ : Any =self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
UpperCamelCase_ : int =[]
for piece in pieces:
if len(_lowerCamelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
UpperCamelCase_ : Union[str, Any] =self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCamelCase , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCamelCase_ : Any =cur_pieces[1:]
else:
UpperCamelCase_ : List[str] =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowerCamelCase )
else:
new_pieces.append(_lowerCamelCase )
return new_pieces
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :List[str] ):
'''simple docstring'''
return self.sp_model.PieceToId(_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :Union[str, Any] ):
'''simple docstring'''
return self.sp_model.IdToPiece(_lowerCamelCase )
def lowerCamelCase_ ( self :Union[str, Any] , _lowerCamelCase :Optional[int] ):
'''simple docstring'''
UpperCamelCase_ : int =[]
UpperCamelCase_ : int =''
UpperCamelCase_ : int =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCamelCase ) + token
UpperCamelCase_ : List[Any] =True
UpperCamelCase_ : str =[]
else:
current_sub_tokens.append(_lowerCamelCase )
UpperCamelCase_ : List[Any] =False
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def lowerCamelCase_ ( self :Optional[int] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase_ : Tuple =[self.sep_token_id]
UpperCamelCase_ : Tuple =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self :Union[str, Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None , _lowerCamelCase :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1]
def lowerCamelCase_ ( self :List[Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase_ : Tuple =[self.sep_token_id]
UpperCamelCase_ : List[str] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :str , _lowerCamelCase :Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ : int =os.path.join(
_lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , 'wb' ) as fi:
UpperCamelCase_ : Dict =self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 357 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class a_ ( lowerCamelCase ):
lowercase = """dpt"""
def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=384 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[2, 5, 8, 11] , _SCREAMING_SNAKE_CASE="project" , _SCREAMING_SNAKE_CASE=[4, 2, 1, 0.5] , _SCREAMING_SNAKE_CASE=[96, 192, 384, 768] , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.4 , _SCREAMING_SNAKE_CASE=255 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=[1, 1024, 24, 24] , _SCREAMING_SNAKE_CASE=[0, 1] , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = hidden_size
UpperCamelCase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
UpperCamelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
UpperCamelCase = BitConfig(**_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
UpperCamelCase = BitConfig(**_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = backbone_config
else:
raise ValueError(
F"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." )
UpperCamelCase = backbone_featmap_shape
UpperCamelCase = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = []
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = qkv_bias
UpperCamelCase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
UpperCamelCase = readout_type
UpperCamelCase = reassemble_factors
UpperCamelCase = neck_hidden_sizes
UpperCamelCase = fusion_hidden_size
UpperCamelCase = head_in_index
UpperCamelCase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
UpperCamelCase = use_auxiliary_head
UpperCamelCase = auxiliary_loss_weight
UpperCamelCase = semantic_loss_ignore_index
UpperCamelCase = semantic_classifier_dropout
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCamelCase = self.backbone_config.to_dict()
UpperCamelCase = self.__class__.model_type
return output
| 703 |
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> tuple[float, list[float]]:
UpperCamelCase = list(range(len(__UpperCamelCase ) ) )
UpperCamelCase = [v / w for v, w in zip(__UpperCamelCase , __UpperCamelCase )]
index.sort(key=lambda __UpperCamelCase : ratio[i] , reverse=__UpperCamelCase )
UpperCamelCase = 0
UpperCamelCase = [0] * len(__UpperCamelCase )
for i in index:
if weight[i] <= capacity:
UpperCamelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCamelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return (-y * np.log(A_ ) - (1 - y) * np.log(1 - h )).mean()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = np.dot(A_ , A_ )
return np.sum(y * scores - np.log(1 + np.exp(A_ ) ) )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=70000 ):
"""simple docstring"""
lowerCAmelCase__ : str = np.zeros(x.shape[1] )
for iterations in range(A_ ):
lowerCAmelCase__ : List[Any] = np.dot(A_ , A_ )
lowerCAmelCase__ : Dict = sigmoid_function(A_ )
lowerCAmelCase__ : Optional[int] = np.dot(x.T , h - y ) / y.size
lowerCAmelCase__ : Any = theta - alpha * gradient # updating the weights
lowerCAmelCase__ : List[str] = np.dot(A_ , A_ )
lowerCAmelCase__ : Tuple = sigmoid_function(A_ )
lowerCAmelCase__ : Any = cost_function(A_ , A_ )
if iterations % 100 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_lowerCAmelCase = datasets.load_iris()
_lowerCAmelCase = iris.data[:, :2]
_lowerCAmelCase = (iris.target != 0) * 1
_lowerCAmelCase = 0.1
_lowerCAmelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return sigmoid_function(
np.dot(A_ , A_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((_lowerCAmelCase) , (_lowerCAmelCase)) = (x[:, 0].min(), x[:, 0].max())
((_lowerCAmelCase) , (_lowerCAmelCase)) = (x[:, 1].min(), x[:, 1].max())
((_lowerCAmelCase) , (_lowerCAmelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_lowerCAmelCase = np.c_[xxa.ravel(), xxa.ravel()]
_lowerCAmelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 565 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
a_ = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""BeitFeatureExtractor"""]
a_ = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 221 | 0 |
snake_case = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
snake_case = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
snake_case = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 711 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 198 | 0 |
from __future__ import annotations
lowerCamelCase__ = list[tuple[int, int]]
lowerCamelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class snake_case__ :
'''simple docstring'''
def __init__( self , a__ , a__ , a__ , a__ , a__ , a__ , ) -> List[str]:
'''simple docstring'''
__snake_case :int = pos_x
__snake_case :Optional[int] = pos_y
__snake_case :Optional[Any] = (pos_y, pos_x)
__snake_case :Any = goal_x
__snake_case :List[Any] = goal_y
__snake_case :Optional[Any] = g_cost
__snake_case :int = parent
__snake_case :Tuple = self.calculate_heuristic()
def __lowercase ( self ) -> float:
'''simple docstring'''
__snake_case :List[Any] = abs(self.pos_x - self.goal_x )
__snake_case :Tuple = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , a__ ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class snake_case__ :
'''simple docstring'''
def __init__( self , a__ , a__ ) -> List[str]:
'''simple docstring'''
__snake_case :Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , a__ )
__snake_case :Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , a__ )
__snake_case :str = [self.start]
__snake_case :list[Node] = []
__snake_case :int = False
def __lowercase ( self ) -> Path | None:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__snake_case :int = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__snake_case :Tuple = True
return self.retrace_path(a__ )
self.closed_nodes.append(a__ )
__snake_case :Dict = self.get_successors(a__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(a__ )
else:
# retrieve the best current path
__snake_case :Dict = self.open_nodes.pop(self.open_nodes.index(a__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(a__ )
else:
self.open_nodes.append(a__ )
if not self.reached:
return [self.start.pos]
return None
def __lowercase ( self , a__ ) -> list[Node]:
'''simple docstring'''
__snake_case :List[str] = []
for action in delta:
__snake_case :List[str] = parent.pos_x + action[1]
__snake_case :str = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
a__ , a__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , a__ , ) )
return successors
def __lowercase ( self , a__ ) -> Path:
'''simple docstring'''
__snake_case :Optional[int] = node
__snake_case :List[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__snake_case :Dict = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowerCamelCase__ = (0, 0)
lowerCamelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
lowerCamelCase__ = GreedyBestFirst(init, goal)
lowerCamelCase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowerCamelCase__ = 2
for elem in grid:
print(elem)
| 455 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """sentencepiece.model"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
}
lowerCamelCase__ = {
"""google/rembert""": 256,
}
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : str = VOCAB_FILES_NAMES
lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , a__ , a__=False , a__=True , a__=True , a__="[CLS]" , a__="[SEP]" , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , **a__ , ) -> List[str]:
'''simple docstring'''
super().__init__(
do_lower_case=a__ , remove_space=a__ , keep_accents=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , **a__ , )
__snake_case :List[Any] = do_lower_case
__snake_case :Dict = remove_space
__snake_case :int = keep_accents
__snake_case :Optional[int] = vocab_file
__snake_case :int = spm.SentencePieceProcessor()
self.sp_model.Load(a__ )
@property
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
return len(self.sp_model )
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :int = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
'''simple docstring'''
__snake_case :str = self.__dict__.copy()
__snake_case :Dict = None
return state
def __setstate__( self , a__ ) -> str:
'''simple docstring'''
__snake_case :Dict = d
__snake_case :Dict = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __lowercase ( self , a__ , a__=False ) -> Tuple:
'''simple docstring'''
__snake_case :Union[str, Any] = self.sp_model.EncodeAsPieces(a__ )
return pieces
def __lowercase ( self , a__ ) -> str:
'''simple docstring'''
return self.sp_model.PieceToId(a__ )
def __lowercase ( self , a__ ) -> Any:
'''simple docstring'''
return self.sp_model.IdToPiece(a__ )
def __lowercase ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
__snake_case :List[Any] = self.sp_model.decode_pieces(a__ )
return out_string
def __lowercase ( self , a__ , a__ = None ) -> List[int]:
'''simple docstring'''
__snake_case :Optional[int] = [self.sep_token_id]
__snake_case :List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowercase ( self , a__ , a__ = None , a__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1]
def __lowercase ( self , a__ , a__ = None ) -> List[int]:
'''simple docstring'''
__snake_case :Any = [self.sep_token_id]
__snake_case :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(a__ ) )
return
__snake_case :Optional[int] = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 455 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
A_ : int = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowerCamelCase__ : str = 128
elif "12-12" in model_name:
lowerCamelCase__ : Union[str, Any] = 12
lowerCamelCase__ : str = 12
elif "14-14" in model_name:
lowerCamelCase__ : Union[str, Any] = 14
lowerCamelCase__ : str = 14
elif "16-16" in model_name:
lowerCamelCase__ : Optional[Any] = 16
lowerCamelCase__ : int = 16
else:
raise ValueError('Model not supported' )
lowerCamelCase__ : List[str] = 'huggingface/label-files'
if "speech-commands" in model_name:
lowerCamelCase__ : Optional[Any] = 35
lowerCamelCase__ : Any = 'speech-commands-v2-id2label.json'
else:
lowerCamelCase__ : Optional[int] = 527
lowerCamelCase__ : Any = 'audioset-id2label.json'
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : str = idalabel
lowerCamelCase__ : str = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( _lowerCamelCase ):
if "module.v" in name:
lowerCamelCase__ : str = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
lowerCamelCase__ : Optional[Any] = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
lowerCamelCase__ : int = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowerCamelCase__ : str = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
lowerCamelCase__ : str = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
lowerCamelCase__ : Dict = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCamelCase__ : List[str] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCamelCase__ : Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCamelCase__ : str = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCamelCase__ : Any = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowerCamelCase__ : Dict = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
lowerCamelCase__ : Any = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Any = orig_state_dict.pop(_lowerCamelCase )
if "qkv" in key:
lowerCamelCase__ : int = key.split('.' )
lowerCamelCase__ : Any = int(key_split[3] )
lowerCamelCase__ : Optional[int] = config.hidden_size
if "weight" in key:
lowerCamelCase__ : Dict = val[:dim, :]
lowerCamelCase__ : Union[str, Any] = val[dim : dim * 2, :]
lowerCamelCase__ : str = val[-dim:, :]
else:
lowerCamelCase__ : Tuple = val[:dim]
lowerCamelCase__ : str = val[dim : dim * 2]
lowerCamelCase__ : Optional[int] = val[-dim:]
else:
lowerCamelCase__ : Union[str, Any] = val
return orig_state_dict
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : int = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
lowerCamelCase__ : Optional[int] = get_audio_spectrogram_transformer_config(_lowerCamelCase )
lowerCamelCase__ : str = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
lowerCamelCase__ : List[Any] = model_name_to_url[model_name]
lowerCamelCase__ : str = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location='cpu' )
# remove some keys
remove_keys(_lowerCamelCase )
# rename some keys
lowerCamelCase__ : List[str] = convert_state_dict(_lowerCamelCase , _lowerCamelCase )
# load 🤗 model
lowerCamelCase__ : List[Any] = ASTForAudioClassification(_lowerCamelCase )
model.eval()
model.load_state_dict(_lowerCamelCase )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowerCamelCase__ : Dict = -4.2_677_393 if 'speech-commands' not in model_name else -6.845_978
lowerCamelCase__ : Optional[Any] = 4.5_689_974 if 'speech-commands' not in model_name else 5.5_654_526
lowerCamelCase__ : Optional[int] = 1024 if 'speech-commands' not in model_name else 128
lowerCamelCase__ : Tuple = ASTFeatureExtractor(mean=_lowerCamelCase , std=_lowerCamelCase , max_length=_lowerCamelCase )
if "speech-commands" in model_name:
lowerCamelCase__ : Dict = load_dataset('speech_commands' , 'v0.02' , split='validation' )
lowerCamelCase__ : str = dataset[0]['audio']['array']
else:
lowerCamelCase__ : List[Any] = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
lowerCamelCase__ , lowerCamelCase__ : Dict = torchaudio.load(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = waveform.squeeze().numpy()
lowerCamelCase__ : str = feature_extractor(_lowerCamelCase , sampling_rate=1_6000 , return_tensors='pt' )
# forward pass
lowerCamelCase__ : List[Any] = model(**_lowerCamelCase )
lowerCamelCase__ : Dict = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowerCamelCase__ : int = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowerCamelCase__ : Union[str, Any] = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowerCamelCase__ : Optional[Any] = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowerCamelCase__ : Dict = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowerCamelCase__ : Optional[int] = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowerCamelCase__ : Tuple = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowerCamelCase__ : Optional[int] = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowerCamelCase__ : str = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1e-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(f'''MIT/{model_name}''' )
feature_extractor.push_to_hub(f'''MIT/{model_name}''' )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A_ : Optional[int] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 696 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A_ : Optional[int] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A_ : List[str] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
A_ : Tuple = BeautifulSoup(res.text, "html.parser")
A_ : Dict = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 696 | 1 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Dict = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Optional[int] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__lowerCamelCase : List[str] = tuple[int, int]
class a__ :
def __init__( self : Union[str, Any],_A : int,_A : int,_A : int,_A : int,_A : int,_A : Node | None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = pos_x
SCREAMING_SNAKE_CASE_ : str = pos_y
SCREAMING_SNAKE_CASE_ : Dict = (pos_y, pos_x)
SCREAMING_SNAKE_CASE_ : Optional[int] = goal_x
SCREAMING_SNAKE_CASE_ : str = goal_y
SCREAMING_SNAKE_CASE_ : List[str] = g_cost
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : str = self.calculate_heuristic()
SCREAMING_SNAKE_CASE_ : Any = self.g_cost + self.h_cost
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.pos_x - self.goal_x
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_A ) + abs(_A )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Dict,_A : Node ):
"""simple docstring"""
return self.f_cost < other.f_cost
class a__ :
def __init__( self : Optional[Any],_A : TPosition,_A : TPosition ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = Node(start[1],start[0],goal[1],goal[0],0,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Node(goal[1],goal[0],goal[1],goal[0],9_9999,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = [self.start]
SCREAMING_SNAKE_CASE_ : list[Node] = []
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE_ : List[str] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_A )
self.closed_nodes.append(_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_successors(_A )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_A )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE_ : str = self.open_nodes.pop(self.open_nodes.index(_A ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_A )
else:
self.open_nodes.append(_A )
return [self.start.pos]
def __UpperCamelCase ( self : Dict,_A : Node ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = []
for action in delta:
SCREAMING_SNAKE_CASE_ : int = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE_ : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_A,_A,self.target.pos_y,self.target.pos_x,parent.g_cost + 1,_A,) )
return successors
def __UpperCamelCase ( self : Optional[Any],_A : Node | None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = node
SCREAMING_SNAKE_CASE_ : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE_ : int = current_node.parent
path.reverse()
return path
class a__ :
def __init__( self : Tuple,_A : TPosition,_A : TPosition ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = AStar(_A,_A )
SCREAMING_SNAKE_CASE_ : Tuple = AStar(_A,_A )
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.fwd_astar.open_nodes.pop(0 )
SCREAMING_SNAKE_CASE_ : int = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_A,_A )
self.fwd_astar.closed_nodes.append(_A )
self.bwd_astar.closed_nodes.append(_A )
SCREAMING_SNAKE_CASE_ : int = current_bwd_node
SCREAMING_SNAKE_CASE_ : Tuple = current_fwd_node
SCREAMING_SNAKE_CASE_ : str = {
self.fwd_astar: self.fwd_astar.get_successors(_A ),
self.bwd_astar: self.bwd_astar.get_successors(_A ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_A )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE_ : Optional[int] = astar.open_nodes.pop(
astar.open_nodes.index(_A ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_A )
else:
astar.open_nodes.append(_A )
return [self.fwd_astar.start.pos]
def __UpperCamelCase ( self : List[str],_A : Node,_A : Node ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.fwd_astar.retrace_path(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.bwd_astar.retrace_path(_A )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE_ : Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__lowerCamelCase : Tuple = (0, 0)
__lowerCamelCase : Dict = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase : Any = time.time()
__lowerCamelCase : Optional[Any] = AStar(init, goal)
__lowerCamelCase : List[Any] = a_star.search()
__lowerCamelCase : Tuple = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
__lowerCamelCase : Any = time.time()
__lowerCamelCase : Any = BidirectionalAStar(init, goal)
__lowerCamelCase : str = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 216 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : str = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : List[str] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__lowerCamelCase : Tuple = {
'''moussaKam/mbarthez''': 10_24,
'''moussaKam/barthez''': 10_24,
'''moussaKam/barthez-orangesum-title''': 10_24,
}
__lowerCamelCase : Optional[int] = '''▁'''
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
A = BarthezTokenizer
def __init__( self : Optional[int],_A : int=None,_A : List[Any]=None,_A : Union[str, Any]="<s>",_A : Dict="</s>",_A : Union[str, Any]="</s>",_A : Union[str, Any]="<s>",_A : Optional[Any]="<unk>",_A : str="<pad>",_A : Tuple="<mask>",**_A : Dict,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
_A,tokenizer_file=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,cls_token=_A,pad_token=_A,mask_token=_A,**_A,)
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_file
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False if not self.vocab_file else True
def __UpperCamelCase ( self : Any,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : Tuple,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : List[str],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : str = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file,_A )
return (out_vocab_file,)
| 216 | 1 |
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
A_ : int
A_ : jnp.dtype = jnp.floataa
def _UpperCAmelCase ( self ) -> List[Any]:
_a = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __UpperCAmelCase ) -> Union[str, Any]:
_a , _a , _a , _a = hidden_states.shape
_a = jax.image.resize(
__UpperCAmelCase , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
_a = self.conv(__UpperCAmelCase )
return hidden_states
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
A_ : int
A_ : jnp.dtype = jnp.floataa
def _UpperCAmelCase ( self ) -> int:
_a = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __UpperCAmelCase ) -> str:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_a = self.conv(__UpperCAmelCase )
return hidden_states
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
A_ : int
A_ : int = None
A_ : float = 0.0
A_ : bool = None
A_ : jnp.dtype = jnp.floataa
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.in_channels if self.out_channels is None else self.out_channels
_a = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_a = nn.Conv(
__UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_a = nn.Dense(__UpperCAmelCase , dtype=self.dtype )
_a = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_a = nn.Dropout(self.dropout_prob )
_a = nn.Conv(
__UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_a = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_a = None
if use_nin_shortcut:
_a = nn.Conv(
__UpperCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True ) -> Any:
_a = hidden_states
_a = self.norma(__UpperCAmelCase )
_a = nn.swish(__UpperCAmelCase )
_a = self.conva(__UpperCAmelCase )
_a = self.time_emb_proj(nn.swish(__UpperCAmelCase ) )
_a = jnp.expand_dims(jnp.expand_dims(__UpperCAmelCase , 1 ) , 1 )
_a = hidden_states + temb
_a = self.norma(__UpperCAmelCase )
_a = nn.swish(__UpperCAmelCase )
_a = self.dropout(__UpperCAmelCase , __UpperCAmelCase )
_a = self.conva(__UpperCAmelCase )
if self.conv_shortcut is not None:
_a = self.conv_shortcut(__UpperCAmelCase )
return hidden_states + residual | 285 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['input_values', 'padding_mask']
def __init__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 24000 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> List[str]:
super().__init__(feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , **__UpperCAmelCase )
_a = chunk_length_s
_a = overlap
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
_a = True
_a = bool(
isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
_a = [np.asarray(__UpperCAmelCase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ):
_a = np.asarray(__UpperCAmelCase , dtype=np.floataa )
elif isinstance(__UpperCAmelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_a = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_a = [np.asarray(__UpperCAmelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__UpperCAmelCase ):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' )
_a = None
_a = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_a = min(array.shape[0] for array in raw_audio )
_a = int(np.floor(max_length / self.chunk_stride ) )
_a = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_a = max(array.shape[0] for array in raw_audio )
_a = int(np.ceil(max_length / self.chunk_stride ) )
_a = (nb_step - 1) * self.chunk_stride + self.chunk_length
_a = '''max_length'''
else:
_a = input_values
# normal padding on batch
if padded_inputs is None:
_a = self.pad(
__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , padding=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
if padding:
_a = padded_inputs.pop('''attention_mask''' )
_a = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
_a = example[..., None]
input_values.append(example.T )
_a = input_values
if return_tensors is not None:
_a = padded_inputs.convert_to_tensors(__UpperCAmelCase )
return padded_inputs | 285 | 1 |
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
UpperCAmelCase__ = numpy.array([0, 0])
UpperCAmelCase__ = numpy.array([0.5, 0.866_0254])
UpperCAmelCase__ = numpy.array([1, 0])
UpperCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = initial_vectors
for _ in range(lowercase ):
_UpperCAmelCase = iteration_step(lowercase )
return vectors
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCAmelCase = vectors[i + 1]
new_vectors.append(lowercase )
_UpperCAmelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 ,60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = numpy.radians(lowercase )
_UpperCAmelCase , _UpperCAmelCase = numpy.cos(lowercase ), numpy.sin(lowercase )
_UpperCAmelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCAmelCase , _UpperCAmelCase = zip(*lowercase )
plt.plot(lowercase ,lowercase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 277 | """simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=2 , __lowerCAmelCase : List[Any]=8 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[Any]=99 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Optional[int]=36 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : int=0.0 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : List[str]=16 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Dict=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Any ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = 300
return config
def lowerCAmelCase_ ( self : Any ):
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = self.prepare_config_and_inputs()
_UpperCAmelCase = True
_UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = MraModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , ):
_UpperCAmelCase = True
_UpperCAmelCase = MraModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , )
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , )
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : str ):
_UpperCAmelCase = MraForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : int ):
_UpperCAmelCase = MraForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MraForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MraForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = MraForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_snake_case : Dict = False
_snake_case : Any = False
_snake_case : List[str] = False
_snake_case : Tuple = False
_snake_case : int = ()
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = MraModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = MraModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@unittest.skip(reason="""MRA does not output attentions""" )
def lowerCAmelCase_ ( self : str ):
return
@require_torch
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
_UpperCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
@slow
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
_UpperCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = 5_0265
_UpperCAmelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
@slow
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
_UpperCAmelCase = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = 5_0265
_UpperCAmelCase = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
| 277 | 1 |
'''simple docstring'''
import requests
__a = "YOUR API KEY"
def __snake_case( _lowerCAmelCase , _lowerCAmelCase = giphy_api_key ) -> list:
snake_case__ : Optional[int] = """+""".join(query.split() )
snake_case__ : Dict = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"
snake_case__ : int = requests.get(_lowerCAmelCase ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 706 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __snake_case( ) -> Union[str, Any]:
snake_case__ : Union[str, Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=_lowerCAmelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=_lowerCAmelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=_lowerCAmelCase )
return parser.parse_args()
def __snake_case( ) -> int:
snake_case__ : Tuple = parse_args()
# Import training_script as a module.
snake_case__ : str = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case__ : Tuple = script_fpath.stem
snake_case__ : Any = importlib.import_module(_lowerCAmelCase )
# Patch sys.argv
snake_case__ : Any = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 301 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47 |
def UpperCAmelCase__ ( lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ):
# Check if the input is valid
if not len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
__a , __a , __a : Optional[Any] = equationa
__a , __a , __a : Optional[int] = equationa
# Calculate the determinants of the matrices
__a : str = aa * ba - aa * ba
__a : Tuple = ca * ba - ca * ba
__a : Union[str, Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__a : Any = determinant_x / determinant
__a : Optional[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 47 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : int = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
_lowercase : int = '''deberta-v2'''
def __init__( self , SCREAMING_SNAKE_CASE=1_2_8_1_0_0 , SCREAMING_SNAKE_CASE=1_5_3_6 , SCREAMING_SNAKE_CASE=2_4 , SCREAMING_SNAKE_CASE=2_4 , SCREAMING_SNAKE_CASE=6_1_4_4 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=5_1_2 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-7 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=-1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="gelu" , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
super().__init__(**SCREAMING_SNAKE_CASE )
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = initializer_range
a__ = relative_attention
a__ = max_relative_positions
a__ = pad_token_id
a__ = position_biased_input
# Backwards compatibility
if type(SCREAMING_SNAKE_CASE ) == str:
a__ = [x.strip() for x in pos_att_type.lower().split('''|''' )]
a__ = pos_att_type
a__ = vocab_size
a__ = layer_norm_eps
a__ = kwargs.get('''pooler_hidden_size''' , SCREAMING_SNAKE_CASE )
a__ = pooler_dropout
a__ = pooler_hidden_act
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
a__ = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def _UpperCAmelCase ( self ) -> int:
return 1_2
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = 4_0 , SCREAMING_SNAKE_CASE = 4_0 , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
a__ = super().generate_dummy_inputs(preprocessor=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 148 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a_ : List[Any] = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
_lowercase : Dict = SpeechTaTokenizer
_lowercase : Optional[int] = False
_lowercase : List[Any] = True
def _UpperCAmelCase ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
a__ = SpeechTaTokenizer(SCREAMING_SNAKE_CASE )
a__ = AddedToken('''<mask>''' , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE )
a__ = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
a__ = '''this is a test'''
a__ = '''this is a test'''
return input_text, output_text
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=2_0 , SCREAMING_SNAKE_CASE=5 ) -> Optional[Any]:
a__ , a__ = self.get_input_output_texts(SCREAMING_SNAKE_CASE )
a__ = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
a__ = tokenizer.decode(SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE )
return text, ids
def _UpperCAmelCase ( self ) -> Tuple:
a__ = '''<pad>'''
a__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> str:
a__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 8_1 )
def _UpperCAmelCase ( self ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 7_9 )
def _UpperCAmelCase ( self ) -> str:
a__ = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
a__ = tokenizer.vocab_size
a__ = len(SCREAMING_SNAKE_CASE )
self.assertNotEqual(SCREAMING_SNAKE_CASE , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
a__ = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
a__ = tokenizer.add_tokens(SCREAMING_SNAKE_CASE )
a__ = tokenizer.vocab_size
a__ = len(SCREAMING_SNAKE_CASE )
self.assertNotEqual(SCREAMING_SNAKE_CASE , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
self.assertEqual(SCREAMING_SNAKE_CASE , all_size + len(SCREAMING_SNAKE_CASE ) )
a__ = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
a__ = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
a__ = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE )
a__ = tokenizer.vocab_size
a__ = len(SCREAMING_SNAKE_CASE )
self.assertNotEqual(SCREAMING_SNAKE_CASE , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
self.assertEqual(SCREAMING_SNAKE_CASE , all_size_a + len(SCREAMING_SNAKE_CASE ) )
a__ = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
a__ = self.get_tokenizer()
a__ = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , [4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] , )
a__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
a__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
# fmt: off
self.assertListEqual(SCREAMING_SNAKE_CASE , [4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6] )
# fmt: on
a__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
# Use custom sequence because this tokenizer does not handle numbers.
a__ = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
a__ = {
'''input_ids''': [
[4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2],
[4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=SCREAMING_SNAKE_CASE , )
| 148 | 1 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
UpperCamelCase = {
"b0": {
"hidden_dim": 1_280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1_280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1_408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1_536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1_792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2_048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2_304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2_560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def A ( lowercase__ : Union[str, Any] ) -> int:
UpperCamelCase__ :Optional[int] = EfficientNetConfig()
UpperCamelCase__ :Union[str, Any] = CONFIG_MAP[model_name]["""hidden_dim"""]
UpperCamelCase__ :int = CONFIG_MAP[model_name]["""width_coef"""]
UpperCamelCase__ :int = CONFIG_MAP[model_name]["""depth_coef"""]
UpperCamelCase__ :List[Any] = CONFIG_MAP[model_name]["""image_size"""]
UpperCamelCase__ :List[str] = CONFIG_MAP[model_name]["""dropout_rate"""]
UpperCamelCase__ :Optional[int] = CONFIG_MAP[model_name]["""dw_padding"""]
UpperCamelCase__ :str = """huggingface/label-files"""
UpperCamelCase__ :str = """imagenet-1k-id2label.json"""
UpperCamelCase__ :Tuple = 1000
UpperCamelCase__ :Any = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase__ :Union[str, Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
UpperCamelCase__ :Any = idalabel
UpperCamelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def A ( ) -> Optional[Any]:
UpperCamelCase__ :Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase__ :int = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def A ( lowercase__ : int ) -> List[str]:
UpperCamelCase__ :Union[str, Any] = CONFIG_MAP[model_name]["""image_size"""]
UpperCamelCase__ :Any = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , )
return preprocessor
def A ( lowercase__ : Union[str, Any] ) -> Any:
UpperCamelCase__ :Optional[int] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
UpperCamelCase__ :Union[str, Any] = sorted(set(lowercase__ ) )
UpperCamelCase__ :Any = len(lowercase__ )
UpperCamelCase__ :Tuple = {b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
UpperCamelCase__ :List[Any] = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
UpperCamelCase__ :str = block_name_mapping[b]
rename_keys.append((f"""block{b}_expand_conv/kernel:0""", f"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((f"""block{b}_expand_bn/gamma:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((f"""block{b}_expand_bn/beta:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(f"""block{b}_dwconv/depthwise_kernel:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((f"""block{b}_bn/gamma:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((f"""block{b}_bn/beta:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(f"""block{b}_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(f"""block{b}_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((f"""block{b}_se_reduce/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((f"""block{b}_se_reduce/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((f"""block{b}_se_expand/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((f"""block{b}_se_expand/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(f"""block{b}_project_conv/kernel:0""", f"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((f"""block{b}_project_bn/gamma:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((f"""block{b}_project_bn/beta:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
UpperCamelCase__ :Tuple = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCamelCase__ :List[str] = """efficientnet.""" + item[1]
UpperCamelCase__ :Dict = """classifier.weight"""
UpperCamelCase__ :int = """classifier.bias"""
return key_mapping
def A ( lowercase__ : str , lowercase__ : str , lowercase__ : List[str] ) -> List[str]:
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCamelCase__ :Union[str, Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCamelCase__ :int = torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCamelCase__ :List[str] = torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCamelCase__ :Any = torch.from_numpy(np.transpose(lowercase__ ) )
else:
UpperCamelCase__ :Any = torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def A ( lowercase__ : Optional[Any] , lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : int ) -> Dict:
UpperCamelCase__ :str = model_classes[model_name](
include_top=lowercase__ , weights="""imagenet""" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1000 , classifier_activation="""softmax""" , )
UpperCamelCase__ :Optional[int] = original_model.trainable_variables
UpperCamelCase__ :Optional[Any] = original_model.non_trainable_variables
UpperCamelCase__ :Tuple = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCamelCase__ :int = param.numpy()
UpperCamelCase__ :Optional[int] = list(tf_params.keys() )
# Load HuggingFace model
UpperCamelCase__ :str = get_efficientnet_config(lowercase__ )
UpperCamelCase__ :Any = EfficientNetForImageClassification(lowercase__ ).eval()
UpperCamelCase__ :List[Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
UpperCamelCase__ :Union[str, Any] = rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
UpperCamelCase__ :int = convert_image_processor(lowercase__ )
UpperCamelCase__ :Any = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCamelCase__ :Dict = hf_model(**lowercase__ )
UpperCamelCase__ :Union[str, Any] = outputs.logits.detach().numpy()
# Original model inference
UpperCamelCase__ :str = False
UpperCamelCase__ :Tuple = CONFIG_MAP[model_name]["""image_size"""]
UpperCamelCase__ :Optional[int] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCamelCase__ :Any = image.img_to_array(lowercase__ )
UpperCamelCase__ :Union[str, Any] = np.expand_dims(lowercase__ , axis=0 )
UpperCamelCase__ :List[Any] = original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(f"""Pushing converted {model_name} to the hub...""" )
UpperCamelCase__ :Dict = f"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
UpperCamelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub) | 45 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__lowercase : List[Any] = None
__lowercase : str = logging.get_logger(__name__)
__lowercase : Tuple = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
__lowercase : Dict = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
__lowercase : Tuple = {
"""camembert-base""": 5_1_2,
}
__lowercase : List[str] = """▁"""
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Tuple = VOCAB_FILES_NAMES
__lowercase :List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase :Any = ["input_ids", "attention_mask"]
__lowercase :Union[str, Any] = CamembertTokenizer
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,) | 142 | 0 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case (A_ :int , A_ :int , A_ :int , A_ :int , A_ :int , A_ :int ):
'''simple docstring'''
if (ksize % 2) == 0:
a : Optional[int] = ksize + 1
a : List[Any] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(A_ ):
for x in range(A_ ):
# distance from center
a : Tuple = x - ksize // 2
a : int = y - ksize // 2
# degree to radiant
a : List[str] = theta / 1_8_0 * np.pi
a : Any = np.cos(_theta )
a : List[Any] = np.sin(_theta )
# get kernel x
a : int = cos_theta * px + sin_theta * py
# get kernel y
a : Tuple = -sin_theta * px + cos_theta * py
# fill kernel
a : Optional[int] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_UpperCamelCase : Optional[Any] = imread('../image_data/lena.jpg')
# turn image in gray scale value
_UpperCamelCase : List[str] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_UpperCamelCase : List[Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_UpperCamelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_UpperCamelCase : List[Any] = out / out.max() * 255
_UpperCamelCase : List[str] = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 118 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def snake_case (A_ :int = 1_0_0_0_0_0_0 , A_ :int = 1_0 ):
'''simple docstring'''
a : defaultdict = defaultdict(A_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
a : Optional[Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
a : Union[str, Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(A_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 118 | 1 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowerCAmelCase : Union[str, Any] = Lock()
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__UpperCAmelCase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__UpperCAmelCase = min(UpperCamelCase__ , UpperCamelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__UpperCAmelCase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__UpperCAmelCase = max(UpperCamelCase__ , UpperCamelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__UpperCAmelCase = Pipe()
__UpperCAmelCase = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__UpperCAmelCase = temp_rs
__UpperCAmelCase = temp_rr
for i in range(1 , len(UpperCamelCase__ ) - 1 ):
__UpperCAmelCase = Pipe()
__UpperCAmelCase = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__UpperCAmelCase = temp_rs
__UpperCAmelCase = temp_rr
process_array_.append(
Process(
target=UpperCamelCase__ , args=(
len(UpperCamelCase__ ) - 1,
arr[len(UpperCamelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCamelCase__ ) ):
__UpperCAmelCase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = list(range(1_0 , 0 , -1 ) )
print('''Initial List''' )
print(*UpperCamelCase__ )
__UpperCAmelCase = odd_even_transposition(UpperCamelCase__ )
print('''Sorted List\n''' )
print(*UpperCamelCase__ )
if __name__ == "__main__":
main()
| 262 | '''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class A ( pl.LightningModule ):
def __init__( self : Dict , __a : List[str] ) -> Tuple:
super().__init__()
__UpperCAmelCase = model
__UpperCAmelCase = 2
__UpperCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case__ ( self : int ) -> int:
pass
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
# load longformer model from model identifier
__UpperCAmelCase = LongformerModel.from_pretrained(UpperCamelCase__ )
__UpperCAmelCase = LightningModel(UpperCamelCase__ )
__UpperCAmelCase = torch.load(UpperCamelCase__ , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
__UpperCAmelCase = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCamelCase__ )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 262 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __A ( metaclass=a ):
"""simple docstring"""
A_ = ['speech']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase )-> str:
requires_backends(self , ['''speech'''] )
class __A ( metaclass=a ):
"""simple docstring"""
A_ = ['speech']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase )-> List[str]:
requires_backends(self , ['''speech'''] )
| 318 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __A ( a ):
"""simple docstring"""
A_ = ''
A_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
A_ = None # compression type in fsspec. ex: "gzip"
A_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , _lowerCamelCase = "" , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase )-> List[str]:
super().__init__(self , **_lowerCamelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowercase__ = fsspec.open(
_lowerCamelCase , mode='''rb''' , protocol=_lowerCamelCase , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowercase__ = os.path.basename(self.file.path.split('''::''' )[0] )
lowercase__ = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowercase__ = None
@classmethod
def snake_case_( cls , _lowerCamelCase )-> List[Any]:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_lowerCamelCase ).lstrip('''/''' )
def snake_case_( self )-> List[str]:
if self.dir_cache is None:
lowercase__ = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowercase__ = {f['''name''']: f}
def snake_case_( self , _lowerCamelCase )-> List[str]:
return self.file.open().read()
def snake_case_( self , _lowerCamelCase , _lowerCamelCase = "rb" , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , )-> Tuple:
lowercase__ = self._strip_protocol(_lowerCamelCase )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class __A ( a ):
"""simple docstring"""
A_ = 'bz2'
A_ = 'bz2'
A_ = '.bz2'
class __A ( a ):
"""simple docstring"""
A_ = 'gzip'
A_ = 'gzip'
A_ = '.gz'
class __A ( a ):
"""simple docstring"""
A_ = 'lz4'
A_ = 'lz4'
A_ = '.lz4'
class __A ( a ):
"""simple docstring"""
A_ = 'xz'
A_ = 'xz'
A_ = '.xz'
class __A ( a ):
"""simple docstring"""
A_ = 'zstd'
A_ = 'zstd'
A_ = '.zst'
def __init__( self , _lowerCamelCase , _lowerCamelCase = "rb" , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = DEFAULT_BLOCK_SIZE , **_lowerCamelCase , )-> Tuple:
super().__init__(
fo=_lowerCamelCase , mode=_lowerCamelCase , target_protocol=_lowerCamelCase , target_options=_lowerCamelCase , block_size=_lowerCamelCase , **_lowerCamelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowercase__ = self.file.__enter__
class __A :
"""simple docstring"""
def __init__( self , _lowerCamelCase )-> Union[str, Any]:
lowercase__ = file_
def __enter__( self )-> int:
self._file.__enter__()
return self
def __exit__( self , *_lowerCamelCase , **_lowerCamelCase )-> List[str]:
self._file.__exit__(*_lowerCamelCase , **_lowerCamelCase )
def __iter__( self )-> int:
return iter(self._file )
def snake_case_( self )-> List[Any]:
return next(self._file )
def __getattr__( self , _lowerCamelCase )-> Any:
return getattr(self._file , _lowerCamelCase )
def fixed_enter(*_lowerCamelCase , **_lowerCamelCase ):
return WrappedFile(_enter(*_lowerCamelCase , **_lowerCamelCase ) )
lowercase__ = fixed_enter
| 318 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( _lowercase : list ) -> list:
if len(_lowercase ) < 2:
return collection
def circle_sort_util(_lowercase : list , _lowercase : int , _lowercase : int ) -> bool:
__UpperCAmelCase: Tuple = False
if low == high:
return swapped
__UpperCAmelCase: int = low
__UpperCAmelCase: int = high
while left < right:
if collection[left] > collection[right]:
__UpperCAmelCase, __UpperCAmelCase: List[str] = (
collection[right],
collection[left],
)
__UpperCAmelCase: Optional[int] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__UpperCAmelCase, __UpperCAmelCase: str = (
collection[right + 1],
collection[left],
)
__UpperCAmelCase: Union[str, Any] = True
__UpperCAmelCase: List[str] = low + int((high - low) / 2 )
__UpperCAmelCase: Optional[int] = circle_sort_util(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase: List[Any] = circle_sort_util(_lowercase , mid + 1 , _lowercase )
return swapped or left_swap or right_swap
__UpperCAmelCase: str = True
while is_not_sorted is True:
__UpperCAmelCase: Dict = circle_sort_util(_lowercase , 0 , len(_lowercase ) - 1 )
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_ = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted)) | 523 | '''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = StableDiffusionPanoramaPipeline
__lowerCAmelCase = TEXT_TO_IMAGE_PARAMS
__lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase: List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__UpperCAmelCase: List[str] = DDIMScheduler()
torch.manual_seed(0 )
__UpperCAmelCase: List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__UpperCAmelCase: Union[str, Any] = CLIPTextModel(snake_case_ )
__UpperCAmelCase: List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__UpperCAmelCase: int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase_ ( self , snake_case_ , snake_case_=0 ):
'''simple docstring'''
__UpperCAmelCase: Dict = torch.manual_seed(snake_case_ )
__UpperCAmelCase: Optional[Any] = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: str = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase: Any = self.get_dummy_components()
__UpperCAmelCase: str = StableDiffusionPanoramaPipeline(**snake_case_ )
__UpperCAmelCase: Tuple = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase: Optional[int] = self.get_dummy_inputs(snake_case_ )
__UpperCAmelCase: str = sd_pipe(**snake_case_ ).images
__UpperCAmelCase: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase: Dict = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ):
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase: Tuple = self.get_dummy_components()
__UpperCAmelCase: Any = StableDiffusionPanoramaPipeline(**snake_case_ )
__UpperCAmelCase: List[Any] = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase: int = self.get_dummy_inputs(snake_case_ )
__UpperCAmelCase: Optional[Any] = """french fries"""
__UpperCAmelCase: str = sd_pipe(**snake_case_ , negative_prompt=snake_case_ )
__UpperCAmelCase: Tuple = output.images
__UpperCAmelCase: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase: int = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase: List[str] = self.get_dummy_components()
__UpperCAmelCase: str = StableDiffusionPanoramaPipeline(**snake_case_ )
__UpperCAmelCase: Dict = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase: str = self.get_dummy_inputs(snake_case_ )
__UpperCAmelCase: Dict = sd_pipe(**snake_case_ , view_batch_size=2 )
__UpperCAmelCase: Dict = output.images
__UpperCAmelCase: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase: Optional[Any] = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase: Optional[int] = self.get_dummy_components()
__UpperCAmelCase: Tuple = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" )
__UpperCAmelCase: str = StableDiffusionPanoramaPipeline(**snake_case_ )
__UpperCAmelCase: Dict = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase: Dict = self.get_dummy_inputs(snake_case_ )
__UpperCAmelCase: Tuple = sd_pipe(**snake_case_ ).images
__UpperCAmelCase: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase: Union[str, Any] = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase: Dict = self.get_dummy_components()
__UpperCAmelCase: List[Any] = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , skip_prk_steps=snake_case_ )
__UpperCAmelCase: List[str] = StableDiffusionPanoramaPipeline(**snake_case_ )
__UpperCAmelCase: str = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase: Optional[Any] = self.get_dummy_inputs(snake_case_ )
__UpperCAmelCase: int = sd_pipe(**snake_case_ ).images
__UpperCAmelCase: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase: Dict = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self , snake_case_=0 ):
'''simple docstring'''
__UpperCAmelCase: Any = torch.manual_seed(snake_case_ )
__UpperCAmelCase: Tuple = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: str = """stabilityai/stable-diffusion-2-base"""
__UpperCAmelCase: Optional[Any] = DDIMScheduler.from_pretrained(snake_case_ , subfolder="""scheduler""" )
__UpperCAmelCase: List[str] = StableDiffusionPanoramaPipeline.from_pretrained(snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
__UpperCAmelCase: Tuple = self.get_inputs()
__UpperCAmelCase: Tuple = pipe(**snake_case_ ).images
__UpperCAmelCase: List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
__UpperCAmelCase: int = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[str] = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=snake_case_ )
__UpperCAmelCase: int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
__UpperCAmelCase: str = self.get_inputs()
__UpperCAmelCase: str = pipe(**snake_case_ ).images
__UpperCAmelCase: Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
__UpperCAmelCase: str = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = 0
def callback_fn(snake_case_ , snake_case_ , snake_case_ ) -> None:
__UpperCAmelCase: List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__UpperCAmelCase: Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
__UpperCAmelCase: Union[str, Any] = latents[0, -3:, -3:, -1]
__UpperCAmelCase: Any = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__UpperCAmelCase: Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
__UpperCAmelCase: str = latents[0, -3:, -3:, -1]
__UpperCAmelCase: List[Any] = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__UpperCAmelCase: Union[str, Any] = False
__UpperCAmelCase: Optional[Any] = """stabilityai/stable-diffusion-2-base"""
__UpperCAmelCase: Optional[Any] = DDIMScheduler.from_pretrained(snake_case_ , subfolder="""scheduler""" )
__UpperCAmelCase: Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ )
__UpperCAmelCase: List[Any] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
__UpperCAmelCase: Optional[Any] = self.get_inputs()
pipe(**snake_case_ , callback=snake_case_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase_ ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase: Optional[Any] = """stabilityai/stable-diffusion-2-base"""
__UpperCAmelCase: Optional[int] = DDIMScheduler.from_pretrained(snake_case_ , subfolder="""scheduler""" )
__UpperCAmelCase: Any = StableDiffusionPanoramaPipeline.from_pretrained(snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ )
__UpperCAmelCase: Tuple = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase: Optional[Any] = self.get_inputs()
__UpperCAmelCase: Union[str, Any] = pipe(**snake_case_ )
__UpperCAmelCase: Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9 | 523 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : int = logging.get_logger(__name__)
__a : Tuple = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Optional[int] = """visual_bert"""
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=512 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , **lowerCamelCase , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
lowercase__ : Optional[Any] = vocab_size
lowercase__ : Any = max_position_embeddings
lowercase__ : str = hidden_size
lowercase__ : Optional[int] = visual_embedding_dim
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[int] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : List[str] = initializer_range
lowercase__ : Tuple = type_vocab_size
lowercase__ : int = layer_norm_eps
lowercase__ : Union[str, Any] = bypass_transformer
lowercase__ : Dict = special_visual_initialize | 298 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__a : Dict = logging.get_logger(__name__)
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> None:
"""simple docstring"""
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase ) | 298 | 1 |
def A__ ( _a : int ):
'''simple docstring'''
if num < 0:
return False
snake_case__ : int =num
snake_case__ : int =0
while num > 0:
snake_case__ : Optional[int] =rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 385 |
def A ( lowercase__ : int ) -> Optional[Any]:
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted)) | 45 | 0 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = []
__UpperCamelCase = set({'''(''', '''[''', '''{'''} )
__UpperCamelCase = set({''')''', ''']''', '''}'''} )
__UpperCamelCase = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(lowercase_ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowercase_ ) == 0 or (len(lowercase_ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowercase_ ) == 0
def __SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
__UpperCamelCase = input('''Enter sequence of brackets: ''' )
if is_balanced(lowercase_ ):
print(lowercase_ , '''is balanced''' )
else:
print(lowercase_ , '''is not balanced''' )
if __name__ == "__main__":
main()
| 375 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@staticmethod
@abstractmethod
def snake_case ( snake_case : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def snake_case ( self : Optional[Any] ):
raise NotImplementedError()
| 375 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowercase ( A ):
'''simple docstring'''
_A : Optional[Any] = '''naver-clova-ix/donut-base-finetuned-docvqa'''
_A : str = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
_A : str = '''document_qa'''
_A : List[str] = AutoProcessor
_A : Dict = VisionEncoderDecoderModel
_A : List[Any] = ['''image''', '''text''']
_A : List[Any] = ['''text''']
def __init__( self : int , *_a : Any , **_a : Optional[Any] ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*_a , **_a )
def A_ ( self : int , _a : "Image" , _a : str ):
UpperCamelCase__ = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
UpperCamelCase__ = task_prompt.replace('''{user_input}''' , _a )
UpperCamelCase__ = self.pre_processor.tokenizer(
_a , add_special_tokens=_a , return_tensors='''pt''' ).input_ids
UpperCamelCase__ = self.pre_processor(_a , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def A_ ( self : Optional[Any] , _a : Any ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_a , ).sequences
def A_ ( self : Dict , _a : Optional[int] ):
UpperCamelCase__ = self.pre_processor.batch_decode(_a )[0]
UpperCamelCase__ = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
UpperCamelCase__ = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
UpperCamelCase__ = re.sub(R'''<.*?>''' , '''''' , _a , count=1 ).strip() # remove first task start token
UpperCamelCase__ = self.pre_processor.tokenajson(_a )
return sequence["answer"]
| 240 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase = {
"""configuration_vision_text_dual_encoder""": ["""VisionTextDualEncoderConfig"""],
"""processing_vision_text_dual_encoder""": ["""VisionTextDualEncoderProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""VisionTextDualEncoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""FlaxVisionTextDualEncoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""TFVisionTextDualEncoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 240 | 1 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCamelCase__ ( a : List[Any] ) -> List[str]: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
a__ :str = [1, 2, 3]
with pytest.raises(_lowercase ):
with parallel_backend("unsupported backend" ):
map_nested(_lowercase , _lowercase , num_proc=2 )
with pytest.raises(_lowercase ):
with parallel_backend("unsupported backend" ):
map_nested(_lowercase , _lowercase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def lowerCamelCase__ ( a : Union[str, Any] ) -> str:
"""simple docstring"""
a__ :Dict = [1, 2]
a__ :Tuple = {"a": 1, "b": 2}
a__ :List[Any] = {"a": [1, 2], "b": [3, 4]}
a__ :int = {"a": {"1": 1}, "b": 2}
a__ :Dict = {"a": 1, "b": 2, "c": 3, "d": 4}
a__ :Dict = [2, 3]
a__ :List[str] = {"a": 2, "b": 3}
a__ :Any = {"a": [2, 3], "b": [4, 5]}
a__ :Optional[int] = {"a": {"1": 2}, "b": 3}
a__ :int = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(_lowercase , _lowercase , num_proc=_lowercase ) == expected_map_nested_sa
assert map_nested(_lowercase , _lowercase , num_proc=_lowercase ) == expected_map_nested_sa
assert map_nested(_lowercase , _lowercase , num_proc=_lowercase ) == expected_map_nested_sa
assert map_nested(_lowercase , _lowercase , num_proc=_lowercase ) == expected_map_nested_sa
assert map_nested(_lowercase , _lowercase , num_proc=_lowercase ) == expected_map_nested_sa
| 709 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowerCAmelCase_ :
def __init__( self : str , __A : str = "cpu" , __A : str = "openai/clip-vit-large-patch14" ) ->None:
"""simple docstring"""
a__ :List[str] = device
a__ :Optional[Any] = CLIPTokenizerFast.from_pretrained(__A )
a__ :str = [0.48_145_466, 0.4_578_275, 0.40_821_073]
a__ :Any = [0.26_862_954, 0.26_130_258, 0.27_577_711]
a__ :Union[str, Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
a__ :List[Any] = torchvision.transforms.Resize(224 )
a__ :Optional[Any] = torchvision.transforms.CenterCrop(224 )
def _snake_case ( self : int , __A : int ) ->List[Any]:
"""simple docstring"""
a__ :List[str] = self.resize(__A )
a__ :Union[str, Any] = self.center_crop(__A )
a__ :List[str] = self.normalize(__A )
return images
def __call__( self : Union[str, Any] , __A : Dict=None , __A : List[Any]=None , **__A : Union[str, Any] ) ->Tuple:
"""simple docstring"""
a__ :Optional[int] = self.tokenizer(text=__A , **__A )
a__ :str = self.preprocess_img(__A )
a__ :Any = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowerCAmelCase_ ( nn.Module):
def __init__( self : List[str] , __A : List[str]=10 , __A : List[Any]=0.01 , __A : Optional[int]=None , __A : List[str]=None , __A : Dict=None , __A : List[str]=None , __A : Tuple=None , __A : str=None , __A : Any=False , __A : Union[str, Any]=True , __A : Tuple="image" , __A : Tuple=True , __A : int=False , __A : Tuple=False , __A : Union[str, Any]=False , ) ->None:
"""simple docstring"""
super().__init__()
a__ :List[str] = None
a__ :int = device if device else get_device()
if vqgan:
a__ :Any = vqgan
else:
a__ :Optional[int] = load_vqgan(self.device , conf_path=__A , ckpt_path=__A )
self.vqgan.eval()
if clip:
a__ :Dict = clip
else:
a__ :Any = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" )
self.clip.to(self.device )
a__ :Optional[int] = ProcessorGradientFlow(device=self.device )
a__ :Any = iterations
a__ :Any = lr
a__ :List[str] = log
a__ :List[Any] = make_grid
a__ :Optional[int] = return_val
a__ :Optional[Any] = quantize
a__ :Any = self.vqgan.decoder.z_shape
def _snake_case ( self : str , __A : str=None , __A : Optional[int]=None , __A : int=5 , __A : Any=True ) ->Optional[int]:
"""simple docstring"""
a__ :Optional[int] = []
if output_path is None:
a__ :Any = "./animation.gif"
if input_path is None:
a__ :Dict = self.save_path
a__ :Dict = sorted(glob(input_path + "/*" ) )
if not len(__A ):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)" )
if len(__A ) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" )
a__ :str = total_duration / len(__A )
a__ :str = [frame_duration] * len(__A )
if extend_frames:
a__ :Optional[Any] = 1.5
a__ :Optional[Any] = 3
for file_name in paths:
if file_name.endswith(".png" ):
images.append(imageio.imread(__A ) )
imageio.mimsave(__A , __A , duration=__A )
print(F'''gif saved to {output_path}''' )
def _snake_case ( self : str , __A : Tuple=None , __A : Optional[int]=None ) ->Optional[int]:
"""simple docstring"""
if not (path or img):
raise ValueError("Input either path or tensor" )
if img is not None:
raise NotImplementedError
a__ :List[Any] = preprocess(Image.open(__A ) , target_image_size=256 ).to(self.device )
a__ :List[str] = preprocess_vqgan(__A )
a__ , *a__ :Union[str, Any] = self.vqgan.encode(__A )
return z
def _snake_case ( self : List[str] , __A : Any ) ->Optional[int]:
"""simple docstring"""
a__ :Dict = self.latent.detach().requires_grad_()
a__ :int = base_latent + transform_vector
if self.quantize:
a__ , *a__ :str = self.vqgan.quantize(__A )
else:
a__ :List[str] = trans_latent
return self.vqgan.decode(__A )
def _snake_case ( self : List[str] , __A : Optional[Any] , __A : Optional[Any] , __A : List[Any]=None ) ->str:
"""simple docstring"""
a__ :Dict = self.clip_preprocessor(text=__A , images=__A , return_tensors="pt" , padding=__A )
a__ :List[str] = self.clip(**__A )
a__ :Optional[int] = clip_outputs.logits_per_image
if weights is not None:
a__ :Tuple = similarity_logits * weights
return similarity_logits.sum()
def _snake_case ( self : List[str] , __A : List[Any] , __A : Optional[int] , __A : Optional[int] ) ->int:
"""simple docstring"""
a__ :str = self._get_clip_similarity(pos_prompts["prompts"] , __A , weights=(1 / pos_prompts["weights"]) )
if neg_prompts:
a__ :Optional[int] = self._get_clip_similarity(neg_prompts["prompts"] , __A , weights=neg_prompts["weights"] )
else:
a__ :Tuple = torch.tensor([1] , device=self.device )
a__ :Dict = -torch.log(__A ) + torch.log(__A )
return loss
def _snake_case ( self : int , __A : Dict , __A : Union[str, Any] , __A : List[str] ) ->List[str]:
"""simple docstring"""
a__ :List[Any] = torch.randn_like(self.latent , requires_grad=__A , device=self.device )
a__ :Optional[Any] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
a__ :Tuple = self._add_vector(__A )
a__ :Dict = loop_post_process(__A )
a__ :str = self._get_CLIP_loss(__A , __A , __A )
print("CLIP loss" , __A )
if self.log:
wandb.log({"CLIP Loss": clip_loss} )
clip_loss.backward(retain_graph=__A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def _snake_case ( self : Union[str, Any] , __A : Optional[Any] , __A : Union[str, Any] , __A : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
wandb.init(reinit=__A , project="face-editor" )
wandb.config.update({"Positive Prompts": positive_prompts} )
wandb.config.update({"Negative Prompts": negative_prompts} )
wandb.config.update({"lr": self.lr, "iterations": self.iterations} )
if image_path:
a__ :List[str] = Image.open(__A )
a__ :Any = image.resize((256, 256) )
wandb.log("Original Image" , wandb.Image(__A ) )
def _snake_case ( self : Union[str, Any] , __A : Union[str, Any] ) ->int:
"""simple docstring"""
if not prompts:
return []
a__ :List[str] = []
a__ :Tuple = []
if isinstance(__A , __A ):
a__ :Tuple = [prompt.strip() for prompt in prompts.split("|" )]
for prompt in prompts:
if isinstance(__A , (tuple, list) ):
a__ :str = prompt[0]
a__ :List[Any] = float(prompt[1] )
elif ":" in prompt:
a__ , a__ :Tuple = prompt.split(":" )
a__ :Dict = float(__A )
else:
a__ :int = prompt
a__ :Any = 1.0
processed_prompts.append(__A )
weights.append(__A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__A , device=self.device ),
}
def _snake_case ( self : Optional[Any] , __A : Any , __A : int=None , __A : int=None , __A : str=True , __A : List[Any]=False , __A : Optional[int]=True , __A : Any=True , __A : Optional[Any]=None , ) ->Optional[Any]:
"""simple docstring"""
if image_path:
a__ :Optional[int] = self._get_latent(__A )
else:
a__ :Dict = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__A , __A , __A )
assert pos_prompts, "You must provide at least one positive prompt."
a__ :int = self.process_prompts(__A )
a__ :Any = self.process_prompts(__A )
if save_final and save_path is None:
a__ :Dict = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) )
if not os.path.exists(__A ):
os.makedirs(__A )
else:
a__ :str = save_path + "_" + get_timestamp()
os.makedirs(__A )
a__ :Any = save_path
a__ :Optional[int] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("Original Image" )
show_pil(custom_to_pil(__A ) )
a__ :List[str] = loop_post_process(__A )
for iter, transformed_img in enumerate(self._optimize_CLIP(__A , __A , __A ) ):
if show_intermediate:
show_pil(__A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"Image": wandb.Image(__A )} )
if show_final:
show_pil(__A )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
| 373 | 0 |
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> list:
_lowerCamelCase = [True] * n
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
_lowerCamelCase = i * 2
while index < n:
_lowerCamelCase = False
_lowerCamelCase = index + i
_lowerCamelCase = [2]
for i in range(3 , snake_case , 2 ):
if is_prime[i]:
primes.append(snake_case )
return primes
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 999_966_663_333 )-> int:
_lowerCamelCase = math.floor(math.sqrt(snake_case ) ) + 100
_lowerCamelCase = prime_sieve(snake_case )
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = primes[prime_index]
while (last_prime**2) <= limit:
_lowerCamelCase = primes[prime_index + 1]
_lowerCamelCase = last_prime**2
_lowerCamelCase = next_prime**2
# Get numbers divisible by lps(current)
_lowerCamelCase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_lowerCamelCase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_lowerCamelCase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_lowerCamelCase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 650 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : str = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : int = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Any = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
class __a ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["flax"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
@classmethod
def snake_case_ ( cls , *a__ , **a__ ):
requires_backends(cls , ['flax'] )
| 650 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : List[Any] = logging.get_logger(__name__)
a__ : str = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __magic_name__ ( _a ):
UpperCamelCase : List[str] = "realm"
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=8 , __magic_name__=3_0_7_2 , __magic_name__="gelu_new" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=2_5_6 , __magic_name__=1_0 , __magic_name__=1e-3 , __magic_name__=5 , __magic_name__=3_2_0 , __magic_name__=1_3_3_5_3_7_1_8 , __magic_name__=5_0_0_0 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , **__magic_name__ , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
# Common config
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = hidden_size
_lowerCAmelCase = retriever_proj_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = num_candidates
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = layer_norm_eps
# Reader config
_lowerCAmelCase = span_hidden_size
_lowerCAmelCase = max_span_width
_lowerCAmelCase = reader_layer_norm_eps
_lowerCAmelCase = reader_beam_size
_lowerCAmelCase = reader_seq_len
# Retrieval config
_lowerCAmelCase = num_block_records
_lowerCAmelCase = searcher_beam_size
| 714 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=1_0 , __magic_name__=3 , __magic_name__=2 , __magic_name__=2 , __magic_name__=True , __magic_name__=True , __magic_name__=3_2 , __magic_name__=5 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1_0 , __magic_name__=0.02 , __magic_name__="divided_space_time" , __magic_name__=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_frames
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = attention_type
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scope
_lowerCAmelCase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_lowerCAmelCase = (image_size // patch_size) ** 2
_lowerCAmelCase = (num_frames) * self.num_patches_per_frame + 1
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
_lowerCAmelCase = self.num_labels
return config
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = TimesformerModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = TimesformerForVideoClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ )
# verify the logits shape
_lowerCAmelCase = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : Optional[Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCamelCase : int = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Dict = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Any = False
UpperCamelCase : Tuple = False
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = TimesformerModelTester(self )
_lowerCAmelCase = ConfigTester(
self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=3_7 )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
"""simple docstring"""
_lowerCAmelCase = copy.deepcopy(__magic_name__ )
if return_labels:
if model_class in get_values(__magic_name__ ):
_lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__magic_name__ )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__magic_name__ )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TimesformerModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
for model_class in self.all_model_classes:
_lowerCAmelCase = self.model_tester.seq_length
_lowerCAmelCase = self.model_tester.num_frames
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
_lowerCAmelCase = len(__magic_name__ )
# Check attention is always last and order is fine
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + 1 , len(__magic_name__ ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _lowerCamelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
_lowerCAmelCase = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
_lowerCAmelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video', filename='eating_spaghetti.npy', repo_type='dataset' )
_lowerCAmelCase = np.load(__lowerCamelCase )
return list(__lowerCamelCase )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
__magic_name__ )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_video()
_lowerCAmelCase = image_processor(video[:8] , return_tensors='pt' ).to(__magic_name__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**__magic_name__ )
# verify the logits
_lowerCAmelCase = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
_lowerCAmelCase = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
| 309 | 0 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class SCREAMING_SNAKE_CASE( pl.LightningModule ):
"""simple docstring"""
def __init__( self : int , __snake_case : Any ) -> str:
super().__init__()
UpperCAmelCase : str = model
UpperCAmelCase : Dict = 2
UpperCAmelCase : str = nn.Linear(self.model.config.hidden_size , self.num_labels )
def A ( self : Optional[int] ) -> int:
pass
def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Dict ) -> Optional[int]:
# load longformer model from model identifier
UpperCAmelCase : Optional[Any] = LongformerModel.from_pretrained(__UpperCamelCase )
UpperCAmelCase : Tuple = LightningModel(__UpperCamelCase )
UpperCAmelCase : Dict = torch.load(__UpperCamelCase , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
UpperCAmelCase : List[Any] = LongformerForQuestionAnswering.from_pretrained(__UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__UpperCamelCase )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
UpperCamelCase__: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase__: Optional[Any] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 127 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowerCamelCase_ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowerCamelCase_ = [0, 25, 50]
lowerCamelCase_ = [25, 50, 75]
lowerCamelCase_ = fuzz.membership.trimf(X, abca)
lowerCamelCase_ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowerCamelCase_ = np.ones(75)
lowerCamelCase_ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
lowerCamelCase_ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowerCamelCase_ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowerCamelCase_ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowerCamelCase_ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowerCamelCase_ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowerCamelCase_ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowerCamelCase_ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowerCamelCase_ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 151 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _A ( __lowerCAmelCase ):
snake_case__ : List[str] = 'naver-clova-ix/donut-base-finetuned-docvqa'
snake_case__ : Union[str, Any] = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
snake_case__ : Dict = 'document_qa'
snake_case__ : str = AutoProcessor
snake_case__ : Union[str, Any] = VisionEncoderDecoderModel
snake_case__ : Dict = ['image', 'text']
snake_case__ : Optional[int] = ['text']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowercase = task_prompt.replace("""{user_input}""" , _UpperCamelCase )
lowercase = self.pre_processor.tokenizer(
_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_tensors="""pt""" ).input_ids
lowercase = self.pre_processor(_UpperCamelCase , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_UpperCamelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_UpperCamelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_UpperCamelCase , ).sequences
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.pre_processor.batch_decode(_UpperCamelCase )[0]
lowercase = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
lowercase = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
lowercase = re.sub(r"""<.*?>""" , """""" , _UpperCamelCase , count=1 ).strip() # remove first task start token
lowercase = self.pre_processor.tokenajson(_UpperCamelCase )
return sequence["answer"]
| 715 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : Dict ={
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =[
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197 | 0 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 0.0 ,SCREAMING_SNAKE_CASE_ = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
'''simple docstring'''
import os
import sys
_SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_SCREAMING_SNAKE_CASE = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCamelCase( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) -> int:
return AutoConfig.from_pretrained(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCamelCase( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) -> List[str]:
return AutoTokenizer.from_pretrained(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCamelCase( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return AutoModel.from_pretrained(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCamelCase( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return AutoModelForCausalLM.from_pretrained(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCamelCase( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) -> Dict:
return AutoModelForMaskedLM.from_pretrained(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCamelCase( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return AutoModelForSequenceClassification.from_pretrained(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCamelCase( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) -> str:
return AutoModelForQuestionAnswering.from_pretrained(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 366 | 1 |
def snake_case_ ( SCREAMING_SNAKE_CASE__ = 10_00 ):
'''simple docstring'''
_snake_case = 2**power
_snake_case = 0
while n:
_snake_case , _snake_case = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 714 |
'''simple docstring'''
import functools
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = len(SCREAMING_SNAKE_CASE__ )
_snake_case = len(SCREAMING_SNAKE_CASE__ )
@functools.cache
def min_distance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
_snake_case = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , SCREAMING_SNAKE_CASE__ ) , 1 + min_distance(SCREAMING_SNAKE_CASE__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 | 0 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def a_ ( __lowercase : str , __lowercase : List[Any] , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> str:
_snake_case = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_snake_case = {
'wmt16-en-de-dist-12-1': [2_8.3, 2_7.5_2],
'wmt16-en-de-dist-6-1': [2_7.4, 2_7.1_1],
'wmt16-en-de-12-1': [2_6.9, 2_5.7_5],
}
_snake_case = f'''{src_lang}-{tgt_lang}'''
_snake_case = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=__lowercase , exist_ok=__lowercase )
_snake_case = os.path.join(__lowercase , 'README.md' )
print(f'''Generating {path}''' )
with open(__lowercase , 'w' , encoding='utf-8' ) as f:
f.write(__lowercase )
# make sure we are under the root of the project
_lowerCamelCase : Dict = Path(__file__).resolve().parent.parent.parent
_lowerCamelCase : Any = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_lowerCamelCase : Optional[int] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name) | 686 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __lowercase : Any ) -> List[Any]:
_snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a_ ( __lowercase : Dict ) -> Tuple:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_snake_case = emb.weight.data
return lin_layer
def a_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None ) -> Tuple:
_snake_case = {}
for old_key in state_dict.keys():
_snake_case = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_snake_case = key.replace('moe_layer.experts.0' , f'''ffn.experts.expert_{expert_idx}''' )
else:
_snake_case = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
_snake_case = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
_snake_case = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
_snake_case = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
_snake_case = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
_snake_case = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
_snake_case = key.replace('final_layer_norm' , 'ff_layer_norm' )
_snake_case = state_dict[old_key]
return new_dict
def a_ ( __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Any , __lowercase : List[str] , __lowercase : str = WEIGHTS_NAME ) -> Union[str, Any]:
_snake_case = []
_snake_case = 0
os.makedirs(__lowercase , exist_ok=__lowercase )
for expert in range(__lowercase ):
_snake_case = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__lowercase ):
_snake_case = torch.load(__lowercase )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = os.path.join(
__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
torch.save(__lowercase , __lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowercase )[0]].dtype )
# Add the last block
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
_snake_case = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowercase ) == 1:
_snake_case = os.path.join(__lowercase , __lowercase )
torch.save(__lowercase , __lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowercase , __lowercase )
# Otherwise, let's build the index
_snake_case = {}
for idx, shard in enumerate(__lowercase ):
_snake_case = weights_name.replace('.bin' , f'''-{idx+1:05d}-of-{len(__lowercase ):05d}.bin''' )
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowercase , os.path.join(__lowercase , __lowercase ) )
for key in shard:
_snake_case = shard_file
# Add the metadata
_snake_case = {'total_size': total_size}
_snake_case = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__lowercase , __lowercase ) , 'w' , encoding='utf-8' ) as f:
_snake_case = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '\n'
f.write(__lowercase )
return metadata, index
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCamelCase : Tuple = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCamelCase : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path) | 686 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( __a , __a , __a ) -> float:
"""simple docstring"""
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
lowerCamelCase__: Optional[int] =rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowerCamelCase__: str =years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
def lowerCAmelCase_ ( __a , __a ) -> float:
"""simple docstring"""
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 437 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __a(SCREAMING_SNAKE_CASE_ : Namespace ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_SCREAMING_SNAKE_CASE = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class lowerCAmelCase_ ( __magic_name__ ):
@staticmethod
def _snake_case ( _lowerCAmelCase ) -> str:
_lowerCAmelCase = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=_lowerCAmelCase , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=_lowerCAmelCase )
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase , ) -> Dict:
_lowerCAmelCase = logging.get_logger("transformers-cli/converting" )
self._logger.info(f'''Loading model {model_type}''' )
_lowerCAmelCase = model_type
_lowerCAmelCase = tf_checkpoint
_lowerCAmelCase = pytorch_dump_output
_lowerCAmelCase = config
_lowerCAmelCase = finetuning_task_name
def _snake_case ( self ) -> str:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
if "ckpt" in self._tf_checkpoint.lower():
_lowerCAmelCase = self._tf_checkpoint
_lowerCAmelCase = ""
else:
_lowerCAmelCase = self._tf_checkpoint
_lowerCAmelCase = ""
convert_transfo_xl_checkpoint_to_pytorch(
_lowerCAmelCase , self._config , self._pytorch_dump_output , _lowerCAmelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 18 | import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_snake_case = '''src/transformers'''
_snake_case = '''docs/source/en'''
_snake_case = '''.'''
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> List[str]:
with open(snake_case__, "r", encoding="utf-8", newline="\n" ) as f:
__UpperCAmelCase : str = f.readlines()
# Find the start prompt.
__UpperCAmelCase : Union[str, Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
__UpperCAmelCase : Optional[int] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_snake_case = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
_snake_case = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
_snake_case = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_snake_case = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
_snake_case = direct_transformers_import(TRANSFORMERS_PATH)
def _UpperCamelCase ( snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : int = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", snake_case__ )
return [m.group(0 ) for m in matches]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[str]:
__UpperCAmelCase : Any = 2 if text == "✅" or text == "❌" else len(snake_case__ )
__UpperCAmelCase : Optional[Any] = (width - text_length) // 2
__UpperCAmelCase : Any = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _UpperCamelCase ( ) -> Union[str, Any]:
__UpperCAmelCase : Dict = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase : Dict = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__UpperCAmelCase : Tuple = {name: config.replace("Config", "" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__UpperCAmelCase : int = collections.defaultdict(snake_case__ )
__UpperCAmelCase : str = collections.defaultdict(snake_case__ )
__UpperCAmelCase : str = collections.defaultdict(snake_case__ )
__UpperCAmelCase : Optional[int] = collections.defaultdict(snake_case__ )
__UpperCAmelCase : Optional[int] = collections.defaultdict(snake_case__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(snake_case__ ):
__UpperCAmelCase : Dict = None
if attr_name.endswith("Tokenizer" ):
__UpperCAmelCase : Tuple = slow_tokenizers
__UpperCAmelCase : Tuple = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
__UpperCAmelCase : Dict = fast_tokenizers
__UpperCAmelCase : int = attr_name[:-13]
elif _re_tf_models.match(snake_case__ ) is not None:
__UpperCAmelCase : Tuple = tf_models
__UpperCAmelCase : Optional[Any] = _re_tf_models.match(snake_case__ ).groups()[0]
elif _re_flax_models.match(snake_case__ ) is not None:
__UpperCAmelCase : List[Any] = flax_models
__UpperCAmelCase : Optional[int] = _re_flax_models.match(snake_case__ ).groups()[0]
elif _re_pt_models.match(snake_case__ ) is not None:
__UpperCAmelCase : Any = pt_models
__UpperCAmelCase : Dict = _re_pt_models.match(snake_case__ ).groups()[0]
if lookup_dict is not None:
while len(snake_case__ ) > 0:
if attr_name in model_name_to_prefix.values():
__UpperCAmelCase : int = True
break
# Try again after removing the last word in the name
__UpperCAmelCase : Dict = "".join(camel_case_split(snake_case__ )[:-1] )
# Let's build that table!
__UpperCAmelCase : Optional[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__UpperCAmelCase : int = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__UpperCAmelCase : Dict = [len(snake_case__ ) + 2 for c in columns]
__UpperCAmelCase : Dict = max([len(snake_case__ ) for name in model_names] ) + 2
# Build the table per se
__UpperCAmelCase : List[str] = "|" + "|".join([_center_text(snake_case__, snake_case__ ) for c, w in zip(snake_case__, snake_case__ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
__UpperCAmelCase : Any = {True: "✅", False: "❌"}
for name in model_names:
__UpperCAmelCase : str = model_name_to_prefix[name]
__UpperCAmelCase : List[Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(snake_case__, snake_case__ ) for l, w in zip(snake_case__, snake_case__ )] ) + "|\n"
return table
def _UpperCamelCase ( snake_case__=False ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = _find_text_in_file(
filename=os.path.join(snake_case__, "index.md" ), start_prompt="<!--This table is updated automatically from the auto modules", end_prompt="<!-- End table-->", )
__UpperCAmelCase : List[Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(snake_case__, "index.md" ), "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_snake_case = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 382 | 0 |
import doctest
from collections import deque
import numpy as np
class _lowercase :
def __init__( self : Optional[Any] ) -> None:
"""simple docstring"""
a = [2, 1, 2, -1]
a = [1, 2, 3, 4]
def A ( self : Union[str, Any] ) -> list[float]:
"""simple docstring"""
a = len(self.first_signal )
a = len(self.second_signal )
a = max(__lowerCAmelCase , __lowerCAmelCase )
# create a zero matrix of max_length x max_length
a = [[0] * max_length for i in range(__lowerCAmelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__lowerCAmelCase ):
a = deque(self.second_signal )
rotated_signal.rotate(__lowerCAmelCase )
for j, item in enumerate(__lowerCAmelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
a = np.matmul(np.transpose(__lowerCAmelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__lowerCAmelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 707 |
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
a , a = head.next, head
while fast and fast.next:
a = fast.next.next
a = slow.next
a = slow.next
a = None # Don't forget here! But forget still works!
# reverse the second part
a = None
while second:
a = second.next
a = node
a = second
a = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
a = node.next
a = head.next
return True
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
a = a = a = head
while fast and fast.next:
a , a = fast.next.next, slow.next
# 2. Push the second half into the stack
a = [slow.val]
while slow.next:
a = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
a = cur.next
return True
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ):
'''simple docstring'''
if not head or not head.next:
return True
a = {}
a = 0
while head:
if head.val in d:
d[head.val].append(UpperCAmelCase__ )
else:
a = [pos]
a = head.next
pos += 1
a = pos - 1
a = 0
for v in d.values():
if len(UpperCAmelCase__ ) % 2 != 0:
middle += 1
else:
a = 0
for i in range(0 , len(UpperCAmelCase__ ) ):
if v[i] + v[len(UpperCAmelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 32 | 0 |
import math
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 10
SCREAMING_SNAKE_CASE__ : List[Any] = 7
SCREAMING_SNAKE_CASE__ : Optional[Any] = BALLS_PER_COLOUR * NUM_COLOURS
def _lowerCamelCase ( __lowerCamelCase = 20 ) -> str:
'''simple docstring'''
UpperCAmelCase__ : Dict = math.comb(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase__ : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , __lowerCamelCase )
UpperCAmelCase__ : Tuple = NUM_COLOURS * (1 - missing_colour / total)
return F"{result:.9f}"
if __name__ == "__main__":
print(solution(20))
| 79 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
__snake_case : List[Any] = parser.parse_args()
if args.model_type == "bert":
__snake_case : int = BertForMaskedLM.from_pretrained(args.model_name)
__snake_case : Tuple = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
__snake_case : List[Any] = model.state_dict()
__snake_case : Any = {}
for w in ["word_embeddings", "position_embeddings"]:
__snake_case : List[str] = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
__snake_case : Optional[int] = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
__snake_case : List[str] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__snake_case : str = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
__snake_case : Tuple = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
__snake_case : str = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
__snake_case : Any = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
__snake_case : Union[str, Any] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
__snake_case : Union[str, Any] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
__snake_case : Any = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
__snake_case : Optional[int] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
__snake_case : Union[str, Any] = state_dict['cls.predictions.decoder.weight']
__snake_case : Union[str, Any] = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
__snake_case : Union[str, Any] = state_dict[F"""cls.predictions.transform.dense.{w}"""]
__snake_case : List[str] = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint) | 293 | 0 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,unittest.TestCase ):
lowerCAmelCase_ : Union[str, Any] = IFInpaintingPipeline
lowerCAmelCase_ : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
lowerCAmelCase_ : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase_ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def A_ ( self : int ) -> str:
'''simple docstring'''
return self._get_dummy_components()
def A_ ( self : List[str] , snake_case : Tuple , snake_case : Any=0 ) -> Optional[int]:
'''simple docstring'''
if str(_a ).startswith('mps' ):
A = torch.manual_seed(_a )
else:
A = torch.Generator(device=_a ).manual_seed(_a )
A = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
A = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
A = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def A_ ( self : List[str] ) -> Dict:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def A_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def A_ ( self : Dict ) -> Dict:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def A_ ( self : Any ) -> int:
'''simple docstring'''
self._test_save_load_local()
def A_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 705 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A = {
'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoForCausalLM',
'GPTNeoForQuestionAnswering',
'GPTNeoForSequenceClassification',
'GPTNeoForTokenClassification',
'GPTNeoModel',
'GPTNeoPreTrainedModel',
'load_tf_weights_in_gpt_neo',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'FlaxGPTNeoForCausalLM',
'FlaxGPTNeoModel',
'FlaxGPTNeoPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 109 | 0 |
'''simple docstring'''
import os
def lowercase__( _UpperCamelCase : str = "input.txt" )-> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(_UpperCamelCase ) , _UpperCamelCase ) ) as input_file:
_UpperCamelCase = [
[int(_UpperCamelCase ) for element in line.split("," )]
for line in input_file.readlines()
]
_UpperCamelCase = len(_UpperCamelCase )
_UpperCamelCase = len(matrix[0] )
_UpperCamelCase = [[-1 for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
_UpperCamelCase = matrix[i][0]
for j in range(1 , _UpperCamelCase ):
for i in range(_UpperCamelCase ):
_UpperCamelCase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _UpperCamelCase ):
_UpperCamelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_UpperCamelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 138 |
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
snake_case_ : Optional[int] = '''scheduler_config.json'''
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
_lowerCAmelCase = 5
@dataclass
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
_lowerCAmelCase = 42
class A_ :
'''simple docstring'''
_lowerCAmelCase = SCHEDULER_CONFIG_NAME
_lowerCAmelCase = ["""dtype"""]
_lowerCAmelCase = []
_lowerCAmelCase = True
@classmethod
def a ( cls , A_ = None , A_ = None , A_=False , **A_ , ):
_UpperCamelCase , _UpperCamelCase = cls.load_config(
pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , )
_UpperCamelCase , _UpperCamelCase = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ )
if hasattr(A_ , "create_state" ) and getattr(A_ , "has_state" , A_ ):
_UpperCamelCase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def a ( self , A_ , A_ = False , **A_ ):
self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ )
@property
def a ( self ):
return self._get_compatibles()
@classmethod
def a ( cls ):
_UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) )
_UpperCamelCase = importlib.import_module(__name__.split("." )[0] )
_UpperCamelCase = [
getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ )
]
return compatible_classes
def lowercase__( _UpperCamelCase : jnp.ndarray , _UpperCamelCase : Tuple[int] )-> jnp.ndarray:
"""simple docstring"""
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) , _UpperCamelCase )
def lowercase__( _UpperCamelCase : int , _UpperCamelCase : Tuple=0.999 , _UpperCamelCase : Any=jnp.floataa )-> jnp.ndarray:
"""simple docstring"""
def alpha_bar(_UpperCamelCase : Any ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
_UpperCamelCase = []
for i in range(_UpperCamelCase ):
_UpperCamelCase = i / num_diffusion_timesteps
_UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) , _UpperCamelCase ) )
return jnp.array(_UpperCamelCase , dtype=_UpperCamelCase )
@flax.struct.dataclass
class A_ :
'''simple docstring'''
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
@classmethod
def a ( cls , A_ ):
_UpperCamelCase = scheduler.config
if config.trained_betas is not None:
_UpperCamelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
_UpperCamelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCamelCase = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCamelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" )
_UpperCamelCase = 1.0 - betas
_UpperCamelCase = jnp.cumprod(A_ , axis=0 )
return cls(
alphas=A_ , betas=A_ , alphas_cumprod=A_ , )
def lowercase__( _UpperCamelCase : CommonSchedulerState , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray )-> List[Any]:
"""simple docstring"""
_UpperCamelCase = state.alphas_cumprod
_UpperCamelCase = alphas_cumprod[timesteps] ** 0.5
_UpperCamelCase = sqrt_alpha_prod.flatten()
_UpperCamelCase = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
_UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
_UpperCamelCase = sqrt_one_minus_alpha_prod.flatten()
_UpperCamelCase = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def lowercase__( _UpperCamelCase : CommonSchedulerState , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray )-> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def lowercase__( _UpperCamelCase : CommonSchedulerState , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray )-> Any:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_UpperCamelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 138 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ (a_ ):
"""simple docstring"""
a__ = 42
a__ = 42
def __init__( self :int , lowerCAmelCase__ :UNetaDModel , lowerCAmelCase__ :KarrasVeScheduler ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self :List[str] , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :int = 50 , lowerCAmelCase__ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase__ :Optional[str] = "pil" , lowerCAmelCase__ :bool = True , **lowerCAmelCase__ :Optional[Any] , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.unet.config.sample_size
snake_case_ : Optional[int] = (batch_size, 3, img_size, img_size)
snake_case_ : str = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
snake_case_ : str = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
snake_case_ : List[str] = self.scheduler.schedule[t]
snake_case_ : Union[str, Any] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
snake_case_ : Union[str, Any] = self.scheduler.add_noise_to_input(lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
snake_case_ : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
snake_case_ : str = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
snake_case_ : str = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
snake_case_ : List[str] = self.scheduler.step_correct(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , step_output.prev_sample , step_output["derivative"] , )
snake_case_ : Tuple = step_output.prev_sample
snake_case_ : Dict = (sample / 2 + 0.5).clamp(0 , 1 )
snake_case_ : Optional[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Optional[int] = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 720 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCamelCase : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _A ( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = ZeroShotClassificationPipeline(
model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _A ( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# No kwarg
snake_case_ : List[Any] = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
snake_case_ : int = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : Optional[int] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : str = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# https://github.com/huggingface/transformers/issues/13846
snake_case_ : Dict = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(1 )
] , )
snake_case_ : Tuple = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCAmelCase__ ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier(lowerCAmelCase__ , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels=lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=lowerCAmelCase__ , )
self.run_entailment_id(lowerCAmelCase__ )
def _A ( self :List[Any] , lowerCAmelCase__ :Pipeline ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = zero_shot_classifier.model.config
snake_case_ : Optional[int] = config.labelaid
snake_case_ : Tuple = zero_shot_classifier.entailment_id
snake_case_ : Optional[Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
snake_case_ : Tuple = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
snake_case_ : List[str] = original_labelaid
self.assertEqual(lowerCAmelCase__ , zero_shot_classifier.entailment_id )
@require_torch
def _A ( self :Tuple ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
snake_case_ : int = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
snake_case_ : Optional[int] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
snake_case_ : str = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Optional[int] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
snake_case_ : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
snake_case_ : Tuple = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 656 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"tokenizer_file": "tokenizer.json"}
__A = {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = ['input_ids', 'attention_mask']
lowerCamelCase : Tuple = None
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Any="<unk>" , __SCREAMING_SNAKE_CASE : Dict="<s>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , __SCREAMING_SNAKE_CASE : Optional[Any]="<pad>" , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : int=False , **__SCREAMING_SNAKE_CASE : int , ) -> Any:
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop("""type""" ) )
__UpperCAmelCase =add_prefix_space
__UpperCAmelCase =pre_tok_class(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =add_prefix_space
def _a ( self : int , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> BatchEncoding:
__UpperCAmelCase =kwargs.get("""is_split_into_words""" , __SCREAMING_SNAKE_CASE )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> BatchEncoding:
__UpperCAmelCase =kwargs.get("""is_split_into_words""" , __SCREAMING_SNAKE_CASE )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
""" pretokenized inputs.""" )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
__UpperCAmelCase =self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _a ( self : int , __SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]:
__UpperCAmelCase =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(__SCREAMING_SNAKE_CASE ) > self.model_max_length:
__UpperCAmelCase =input_ids[-self.model_max_length :]
return input_ids
| 68 |
def lowercase_ ( SCREAMING_SNAKE_CASE : bytes ):
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] )
def lowercase_ ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 381 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase ( __a ):
a__ :List[str] = ['''image_processor''', '''tokenizer''']
a__ :Dict = '''BlipImageProcessor'''
a__ :Dict = '''AutoTokenizer'''
def __init__(self , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
UpperCamelCase_ : int = False
super().__init__(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ : Optional[int] = self.image_processor
def __call__(self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
UpperCamelCase_ : Optional[int] = self.tokenizer
UpperCamelCase_ : Optional[int] = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
return text_encoding
# add pixel_values
UpperCamelCase_ : Union[str, Any] = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase )
if text is not None:
UpperCamelCase_ : Union[str, Any] = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
else:
UpperCamelCase_ : Union[str, Any] = None
if text_encoding is not None:
encoding_image_processor.update(__UpperCamelCase )
return encoding_image_processor
def A_ (self , *__UpperCamelCase , **__UpperCamelCase ) -> List[Any]:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def A_ (self , *__UpperCamelCase , **__UpperCamelCase ) -> Tuple:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def A_ (self ) -> List[Any]:
UpperCamelCase_ : Tuple = self.tokenizer.model_input_names
UpperCamelCase_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 714 | from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Dict ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCamelCase ( __a ):
@staticmethod
def A_ (__UpperCamelCase ) -> str:
UpperCamelCase_ : int = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__UpperCamelCase , help="""Name of the model to download""" )
download_parser.set_defaults(func=__UpperCamelCase )
def __init__(self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
UpperCamelCase_ : Dict = model
UpperCamelCase_ : Tuple = cache
UpperCamelCase_ : int = force
UpperCamelCase_ : Optional[int] = trust_remote_code
def A_ (self ) -> Any:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 138 | 0 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str = 1 / sqrt(2 ) ) -> IIRFilter:
__snake_case = tau * frequency / samplerate
__snake_case = sin(_lowerCAmelCase )
__snake_case = cos(_lowerCAmelCase )
__snake_case = _sin / (2 * q_factor)
__snake_case = (1 - _cos) / 2
__snake_case = 1 - _cos
__snake_case = 1 + alpha
__snake_case = -2 * _cos
__snake_case = 1 - alpha
__snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] = 1 / sqrt(2 ) ) -> IIRFilter:
__snake_case = tau * frequency / samplerate
__snake_case = sin(_lowerCAmelCase )
__snake_case = cos(_lowerCAmelCase )
__snake_case = _sin / (2 * q_factor)
__snake_case = (1 + _cos) / 2
__snake_case = -1 - _cos
__snake_case = 1 + alpha
__snake_case = -2 * _cos
__snake_case = 1 - alpha
__snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Dict = 1 / sqrt(2 ) ) -> IIRFilter:
__snake_case = tau * frequency / samplerate
__snake_case = sin(_lowerCAmelCase )
__snake_case = cos(_lowerCAmelCase )
__snake_case = _sin / (2 * q_factor)
__snake_case = _sin / 2
__snake_case = 0
__snake_case = -ba
__snake_case = 1 + alpha
__snake_case = -2 * _cos
__snake_case = 1 - alpha
__snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple = 1 / sqrt(2 ) ) -> IIRFilter:
__snake_case = tau * frequency / samplerate
__snake_case = sin(_lowerCAmelCase )
__snake_case = cos(_lowerCAmelCase )
__snake_case = _sin / (2 * q_factor)
__snake_case = 1 - alpha
__snake_case = -2 * _cos
__snake_case = 1 + alpha
__snake_case = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] = 1 / sqrt(2 ) , ) -> IIRFilter:
__snake_case = tau * frequency / samplerate
__snake_case = sin(_lowerCAmelCase )
__snake_case = cos(_lowerCAmelCase )
__snake_case = _sin / (2 * q_factor)
__snake_case = 10 ** (gain_db / 40)
__snake_case = 1 + alpha * big_a
__snake_case = -2 * _cos
__snake_case = 1 - alpha * big_a
__snake_case = 1 + alpha / big_a
__snake_case = -2 * _cos
__snake_case = 1 - alpha / big_a
__snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict = 1 / sqrt(2 ) , ) -> IIRFilter:
__snake_case = tau * frequency / samplerate
__snake_case = sin(_lowerCAmelCase )
__snake_case = cos(_lowerCAmelCase )
__snake_case = _sin / (2 * q_factor)
__snake_case = 10 ** (gain_db / 40)
__snake_case = (big_a + 1) - (big_a - 1) * _cos
__snake_case = (big_a + 1) + (big_a - 1) * _cos
__snake_case = (big_a - 1) - (big_a + 1) * _cos
__snake_case = (big_a - 1) + (big_a + 1) * _cos
__snake_case = 2 * sqrt(_lowerCAmelCase ) * alpha
__snake_case = big_a * (pmc + aaa)
__snake_case = 2 * big_a * mpc
__snake_case = big_a * (pmc - aaa)
__snake_case = ppmc + aaa
__snake_case = -2 * pmpc
__snake_case = ppmc - aaa
__snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCAmelCase ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : str = 1 / sqrt(2 ) , ) -> IIRFilter:
__snake_case = tau * frequency / samplerate
__snake_case = sin(_lowerCAmelCase )
__snake_case = cos(_lowerCAmelCase )
__snake_case = _sin / (2 * q_factor)
__snake_case = 10 ** (gain_db / 40)
__snake_case = (big_a + 1) - (big_a - 1) * _cos
__snake_case = (big_a + 1) + (big_a - 1) * _cos
__snake_case = (big_a - 1) - (big_a + 1) * _cos
__snake_case = (big_a - 1) + (big_a + 1) * _cos
__snake_case = 2 * sqrt(_lowerCAmelCase ) * alpha
__snake_case = big_a * (ppmc + aaa)
__snake_case = -2 * big_a * pmpc
__snake_case = big_a * (ppmc - aaa)
__snake_case = pmc + aaa
__snake_case = 2 * mpc
__snake_case = pmc - aaa
__snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 69 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Dict = VQModel
__SCREAMING_SNAKE_CASE : Optional[int] = """sample"""
@property
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[int]=(32, 32) ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : Tuple ):
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : str ):
return (3, 32, 32)
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
pass
def UpperCAmelCase__ ( self : str ):
pass
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__UpperCamelCase )
_UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(__UpperCamelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_UpperCAmelCase = image.to(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
| 684 | 0 |
'''simple docstring'''
from __future__ import annotations
import requests
snake_case = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ = 1 , lowerCamelCase_ = "new" , lowerCamelCase_ = None ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowerCamelCase_ ) - valid_terms ) ):
lowerCAmelCase__ : Optional[int] = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(lowerCamelCase_ )
lowerCAmelCase__ : List[str] = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={"User-agent": "A random string"} , )
if response.status_code == 4_2_9:
raise requests.HTTPError
lowerCAmelCase__ : Union[str, Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowerCamelCase_ )}
lowerCAmelCase__ : str = {}
for id_ in range(lowerCamelCase_ ):
lowerCAmelCase__ : List[Any] = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 568 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase ( UpperCamelCase_ ):
A_ : List[Any] = (IPNDMScheduler,)
A_ : Union[str, Any] = (("""num_inference_steps""", 50),)
def _A ( self : Optional[int] , **a__ : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = {"num_train_timesteps": 1000}
config.update(**a__ )
return config
def _A ( self : int , a__ : Tuple=0 , **a__ : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = dict(self.forward_default_kwargs )
lowerCAmelCase__ : Dict = kwargs.pop("num_inference_steps" , a__ )
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample
lowerCAmelCase__ : Union[str, Any] = 0.1 * sample
lowerCAmelCase__ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : Dict = self.get_scheduler_config(**a__ )
lowerCAmelCase__ : Dict = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals
lowerCAmelCase__ : Union[str, Any] = dummy_past_residuals[:]
if time_step is None:
lowerCAmelCase__ : str = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
lowerCAmelCase__ : Union[str, Any] = scheduler_class.from_pretrained(a__ )
new_scheduler.set_timesteps(a__ )
# copy over dummy past residuals
lowerCAmelCase__ : Optional[int] = dummy_past_residuals[:]
lowerCAmelCase__ : Tuple = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
lowerCAmelCase__ : Any = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase__ : Tuple = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
lowerCAmelCase__ : Union[str, Any] = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _A ( self : int ):
'''simple docstring'''
pass
def _A ( self : Tuple , a__ : str=0 , **a__ : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase__ : List[Any] = kwargs.pop("num_inference_steps" , a__ )
lowerCAmelCase__ : Optional[Any] = self.dummy_sample
lowerCAmelCase__ : Optional[Any] = 0.1 * sample
lowerCAmelCase__ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : Optional[Any] = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase__ : Optional[Any] = dummy_past_residuals[:]
if time_step is None:
lowerCAmelCase__ : List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
lowerCAmelCase__ : Any = scheduler_class.from_pretrained(a__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase__ : Dict = dummy_past_residuals[:]
lowerCAmelCase__ : List[str] = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
lowerCAmelCase__ : Optional[int] = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase__ : Union[str, Any] = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
lowerCAmelCase__ : Optional[Any] = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _A ( self : Dict , **a__ : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : Union[str, Any] = self.get_scheduler_config(**a__ )
lowerCAmelCase__ : Dict = scheduler_class(**a__ )
lowerCAmelCase__ : List[Any] = 10
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : Optional[Any] = model(a__ , a__ )
lowerCAmelCase__ : int = scheduler.step(a__ , a__ , a__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : Tuple = model(a__ , a__ )
lowerCAmelCase__ : int = scheduler.step(a__ , a__ , a__ ).prev_sample
return sample
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : str = dict(self.forward_default_kwargs )
lowerCAmelCase__ : Tuple = kwargs.pop("num_inference_steps" , a__ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**a__ )
lowerCAmelCase__ : Any = self.dummy_sample
lowerCAmelCase__ : Union[str, Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(a__ , "set_timesteps" ):
scheduler.set_timesteps(a__ )
elif num_inference_steps is not None and not hasattr(a__ , "set_timesteps" ):
lowerCAmelCase__ : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase__ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCAmelCase__ : Union[str, Any] = dummy_past_residuals[:]
lowerCAmelCase__ : Tuple = scheduler.timesteps[5]
lowerCAmelCase__ : Optional[Any] = scheduler.timesteps[6]
lowerCAmelCase__ : Any = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
lowerCAmelCase__ : Dict = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase__ : int = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
lowerCAmelCase__ : List[Any] = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _A ( self : List[str] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=a__ , time_step=a__ )
def _A ( self : int ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a__ , time_step=a__ )
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.full_loop()
lowerCAmelCase__ : int = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 568 | 1 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=2 , lowercase_=3 , lowercase_=4 , lowercase_=2 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=99 , lowercase_=36 , lowercase_=3 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.02 , lowercase_=6 , lowercase_=6 , lowercase_=3 , lowercase_=4 , lowercase_=None , lowercase_=1000 , ) -> str:
a__ =parent
a__ =batch_size
a__ =num_channels
a__ =image_size
a__ =patch_size
a__ =text_seq_length
a__ =is_training
a__ =use_input_mask
a__ =use_token_type_ids
a__ =use_labels
a__ =vocab_size
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =type_sequence_label_size
a__ =initializer_range
a__ =coordinate_size
a__ =shape_size
a__ =num_labels
a__ =num_choices
a__ =scope
a__ =range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a__ =text_seq_length
a__ =(image_size // patch_size) ** 2 + 1
a__ =self.text_seq_length + self.image_seq_length
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size)
a__ =ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
a__ =bbox[i, j, 3]
a__ =bbox[i, j, 1]
a__ =t
if bbox[i, j, 2] < bbox[i, j, 0]:
a__ =bbox[i, j, 2]
a__ =bbox[i, j, 0]
a__ =t
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =None
if self.use_input_mask:
a__ =random_attention_mask([self.batch_size, self.text_seq_length])
a__ =None
if self.use_token_type_ids:
a__ =ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size)
a__ =None
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ =ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels)
a__ =LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> str:
a__ =LayoutLMvaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
# text + image
a__ =model(lowercase_ , pixel_values=lowercase_)
a__ =model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_)
a__ =model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , token_type_ids=lowercase_)
a__ =model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# text only
a__ =model(lowercase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
a__ =model(pixel_values=lowercase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[str]:
a__ =self.num_labels
a__ =LayoutLMvaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> str:
a__ =self.num_labels
a__ =LayoutLMvaForTokenClassification(config=lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> str:
a__ =LayoutLMvaForQuestionAnswering(config=lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __UpperCamelCase ( self) -> Tuple:
a__ =self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) =config_and_inputs
a__ ={
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase_ (lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =False
snake_case =False
snake_case =False
snake_case =(
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case =(
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Dict:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __UpperCamelCase ( self) -> int:
a__ =LayoutLMvaModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=False) -> Tuple:
a__ =copy.deepcopy(lowercase_)
if model_class in get_values(lowercase_):
a__ ={
k: v.unsqueeze(1).expand(-1 , self.model_tester.num_choices , -1).contiguous()
if isinstance(lowercase_ , torch.Tensor) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase_):
a__ =torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
elif model_class in get_values(lowercase_):
a__ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
a__ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
elif model_class in [
*get_values(lowercase_),
]:
a__ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
elif model_class in [
*get_values(lowercase_),
]:
a__ =torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowercase_ , )
return inputs_dict
def __UpperCamelCase ( self) -> str:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self) -> List[str]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ =type
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_)
@slow
def __UpperCamelCase ( self) -> Optional[Any]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ =LayoutLMvaModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def _lowercase( ):
a__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowercase_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self) -> List[str]:
return LayoutLMvaImageProcessor(apply_ocr=lowercase_) if is_vision_available() else None
@slow
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base').to(lowercase_)
a__ =self.default_image_processor
a__ =prepare_img()
a__ =image_processor(images=lowercase_ , return_tensors='pt').pixel_values.to(lowercase_)
a__ =torch.tensor([[1, 2]])
a__ =torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0)
# forward pass
a__ =model(
input_ids=input_ids.to(lowercase_) , bbox=bbox.to(lowercase_) , pixel_values=pixel_values.to(lowercase_) , )
# verify the logits
a__ =torch.Size((1, 199, 768))
self.assertEqual(outputs.last_hidden_state.shape , lowercase_)
a__ =torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4))
| 20 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
"""simple docstring"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 2, 1] , __SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 4] , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Any=2.0 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Tuple=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=10 , __SCREAMING_SNAKE_CASE : Dict=8 , ) -> List[Any]:
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =image_size
__UpperCAmelCase =patch_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =embed_dim
__UpperCAmelCase =depths
__UpperCAmelCase =num_heads
__UpperCAmelCase =window_size
__UpperCAmelCase =mlp_ratio
__UpperCAmelCase =qkv_bias
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =drop_path_rate
__UpperCAmelCase =hidden_act
__UpperCAmelCase =use_absolute_embeddings
__UpperCAmelCase =patch_norm
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =initializer_range
__UpperCAmelCase =is_training
__UpperCAmelCase =scope
__UpperCAmelCase =use_labels
__UpperCAmelCase =type_sequence_label_size
__UpperCAmelCase =encoder_stride
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase =None
if self.use_labels:
__UpperCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase =self.get_config()
return config, pixel_values, labels
def _a ( self : List[Any] ) -> Optional[Any]:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
__UpperCAmelCase =SwinvaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase =int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Tuple:
__UpperCAmelCase =SwinvaForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCAmelCase =1
__UpperCAmelCase =SwinvaForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
__UpperCAmelCase =self.type_sequence_label_size
__UpperCAmelCase =SwinvaForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : List[str] ) -> Tuple:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCamelCase : Tuple = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Dict = False
lowerCamelCase : Tuple = False
lowerCamelCase : List[str] = False
lowerCamelCase : Tuple = False
def _a ( self : str ) -> str:
__UpperCAmelCase =SwinvaModelTester(self )
__UpperCAmelCase =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37 )
def _a ( self : List[Any] ) -> Optional[int]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : str ) -> str:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def _a ( self : Tuple ) -> Tuple:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def _a ( self : Optional[Any] ) -> int:
pass
def _a ( self : Tuple ) -> int:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def _a ( self : str ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase =[*signature.parameters.keys()]
__UpperCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =True
for model_class in self.all_model_classes:
__UpperCAmelCase =True
__UpperCAmelCase =False
__UpperCAmelCase =True
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
__UpperCAmelCase =len(self.model_tester.depths )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase =True
__UpperCAmelCase =config.window_size**2
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__UpperCAmelCase =len(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__UpperCAmelCase =True
__UpperCAmelCase =True
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
__UpperCAmelCase =self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__UpperCAmelCase =2
self.assertEqual(out_len + added_hidden_states , len(__SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.hidden_states
__UpperCAmelCase =getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# Swinv2 has a different seq_length
__UpperCAmelCase =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCAmelCase =outputs.reshaped_hidden_states
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =reshaped_hidden_states[0].shape
__UpperCAmelCase =(
reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a ( self : str ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =3
__UpperCAmelCase =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
def _a ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Dict:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _a ( self : int ) -> Dict:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase =SwinvaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _a ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =_config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Tuple ) -> Dict:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def _a ( self : int ) -> Optional[int]:
__UpperCAmelCase =SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.default_image_processor
__UpperCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__UpperCAmelCase =image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__UpperCAmelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 68 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if num <= 0:
UpperCamelCase = F"{num}: Invalid input, please enter a positive integer."
raise ValueError(snake_case__ )
UpperCamelCase = [True] * (num + 1)
UpperCamelCase = []
UpperCamelCase = 2
UpperCamelCase = int(math.sqrt(snake_case__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(snake_case__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , snake_case__ ):
if sieve[i] is True:
UpperCamelCase = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(snake_case__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip()))) | 700 |
"""simple docstring"""
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowerCamelCase ( nn.Module ):
def __init__(self , __a , __a ) -> Tuple:
super().__init__()
UpperCamelCase = module
UpperCamelCase = nn.Sequential(
nn.Linear(module.in_features , __a , bias=__a ) , nn.Linear(__a , module.out_features , bias=__a ) , )
UpperCamelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__a )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def snake_case_ (self , __a , *__a , **__a ) -> int:
return self.module(__a , *__a , **__a ) + self.adapter(__a )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCamelCase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
UpperCAmelCase_ = "bigscience/bloom-1b7"
# Constant values
UpperCAmelCase_ = 2.109659552692574
UpperCAmelCase_ = "Hello my name is"
UpperCAmelCase_ = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
UpperCAmelCase_ = 10
def snake_case_ (self ) -> str:
# Models and tokenizer
UpperCamelCase = AutoTokenizer.from_pretrained(self.model_name )
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self ) -> Any:
super().setUp()
# Models and tokenizer
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map="auto" )
def snake_case_ (self ) -> str:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = self.model_abit.config
self.assertTrue(hasattr(__a , "quantization_config" ) )
UpperCamelCase = config.to_dict()
UpperCamelCase = config.to_diff_dict()
UpperCamelCase = config.to_json_string()
def snake_case_ (self ) -> Optional[int]:
from bitsandbytes.nn import Paramsabit
UpperCamelCase = self.model_fpaa.get_memory_footprint()
UpperCamelCase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
UpperCamelCase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def snake_case_ (self ) -> Dict:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__a , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def snake_case_ (self ) -> Tuple:
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" )
UpperCamelCase = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = BitsAndBytesConfig()
UpperCamelCase = True
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , device_map="auto" )
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" )
UpperCamelCase = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def snake_case_ (self ) -> Dict:
with self.assertRaises(__a ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__a )
def snake_case_ (self ) -> int:
UpperCamelCase = BitsAndBytesConfig()
with self.assertRaises(__a ):
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , load_in_abit=__a , device_map="auto" , bnb_abit_quant_type="nf4" , )
def snake_case_ (self ) -> Any:
with self.assertRaises(__a ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(__a ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" )
UpperCamelCase = self.model_fpaa.to(torch.floataa )
UpperCamelCase = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
UpperCamelCase = self.model_fpaa.to("cpu" )
# Check this does not throw an error
UpperCamelCase = self.model_fpaa.half()
# Check this does not throw an error
UpperCamelCase = self.model_fpaa.float()
def snake_case_ (self ) -> str:
UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=__a , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCamelCase ( unittest.TestCase ):
@classmethod
def snake_case_ (cls ) -> Tuple:
UpperCamelCase = "t5-small"
UpperCamelCase = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
UpperCamelCase = AutoTokenizer.from_pretrained(cls.model_name )
UpperCamelCase = "Translate in German: Hello, my dog is cute"
def snake_case_ (self ) -> Any:
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ) -> Optional[Any]:
from transformers import TaForConditionalGeneration
UpperCamelCase = TaForConditionalGeneration._keep_in_fpaa_modules
UpperCamelCase = None
# test with `t5-small`
UpperCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map="auto" )
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCamelCase = model.generate(**__a )
# test with `flan-t5-small`
UpperCamelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map="auto" )
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCamelCase = model.generate(**__a )
UpperCamelCase = modules
def snake_case_ (self ) -> str:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
UpperCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCamelCase = model.generate(**__a )
# test with `flan-t5-small`
UpperCamelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map="auto" )
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCamelCase = model.generate(**__a )
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self ) -> Optional[Any]:
super().setUp()
# model_name
UpperCamelCase = "bigscience/bloom-560m"
UpperCamelCase = "t5-small"
# Different types of model
UpperCamelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=__a , device_map="auto" )
# Sequence classification model
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__a , device_map="auto" )
# CausalLM model
UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map="auto" )
# Seq2seq model
UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__a , device_map="auto" )
def snake_case_ (self ) -> Tuple:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ) -> Optional[int]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self ) -> int:
super().setUp()
def snake_case_ (self ) -> Any:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
UpperCamelCase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self ) -> List[Any]:
super().setUp()
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__a , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
UpperCamelCase = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self ) -> Tuple:
UpperCamelCase = "facebook/opt-350m"
super().setUp()
def snake_case_ (self ) -> List[Any]:
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
UpperCamelCase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
UpperCamelCase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__a ) ):
UpperCamelCase = LoRALayer(module.q_proj , rank=16 )
UpperCamelCase = LoRALayer(module.k_proj , rank=16 )
UpperCamelCase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
UpperCamelCase = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
UpperCamelCase = model.forward(**__a )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__a , __a ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__a , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "gpt2-xl"
UpperCAmelCase_ = 3.3191854854152187
| 544 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 104 | import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( snake_case__ ,unittest.TestCase ):
'''simple docstring'''
a_ = BioGptTokenizer
a_ = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Any = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
_lowerCAmelCase : Any = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
_lowerCAmelCase : Union[str, Any] = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
_lowerCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(_snake_case ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(_snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
_lowerCAmelCase : Optional[int] = "lower newer"
_lowerCAmelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = BioGptTokenizer(self.vocab_file , self.merges_file )
_lowerCAmelCase : int = "lower"
_lowerCAmelCase : List[Any] = ["low", "er</w>"]
_lowerCAmelCase : Optional[Any] = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_lowerCAmelCase : int = tokens + ["<unk>"]
_lowerCAmelCase : str = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
_lowerCAmelCase : Tuple = tokenizer.encode("sequence builders" , add_special_tokens=_snake_case )
_lowerCAmelCase : str = tokenizer.encode("multi-sequence build" , add_special_tokens=_snake_case )
_lowerCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case )
_lowerCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 424 | 0 |
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , lowercase__ : List[Any] , lowercase__ : Tuple ) ->str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase__ , scheduler=lowercase__ )
def __call__( self : int ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Any = self.unet(lowercase__ , lowercase__ ).sample
_UpperCamelCase : Any = self.scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
_UpperCamelCase : Dict = scheduler_output - scheduler_output + torch.ones_like(lowercase__ )
return result
| 718 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = '''yolos'''
def __init__( self : int , lowercase__ : List[str]=768 , lowercase__ : Optional[Any]=12 , lowercase__ : Union[str, Any]=12 , lowercase__ : Any=3_072 , lowercase__ : List[Any]="gelu" , lowercase__ : Dict=0.0 , lowercase__ : Any=0.0 , lowercase__ : Dict=0.0_2 , lowercase__ : Tuple=1e-12 , lowercase__ : str=[512, 864] , lowercase__ : Dict=16 , lowercase__ : int=3 , lowercase__ : Optional[Any]=True , lowercase__ : List[Any]=100 , lowercase__ : str=True , lowercase__ : str=False , lowercase__ : List[str]=1 , lowercase__ : Dict=5 , lowercase__ : str=2 , lowercase__ : Optional[int]=5 , lowercase__ : Optional[int]=2 , lowercase__ : Optional[Any]=0.1 , **lowercase__ : Union[str, Any] , ) ->Tuple:
'''simple docstring'''
super().__init__(**lowercase__ )
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Optional[Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = initializer_range
_UpperCamelCase : str = layer_norm_eps
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : int = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = qkv_bias
_UpperCamelCase : Dict = num_detection_tokens
_UpperCamelCase : int = use_mid_position_embeddings
_UpperCamelCase : int = auxiliary_loss
# Hungarian matcher
_UpperCamelCase : Tuple = class_cost
_UpperCamelCase : str = bbox_cost
_UpperCamelCase : str = giou_cost
# Loss coefficients
_UpperCamelCase : List[str] = bbox_loss_coefficient
_UpperCamelCase : str = giou_loss_coefficient
_UpperCamelCase : str = eos_coefficient
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = version.parse('''1.11''' )
@property
def snake_case__ ( self : Union[str, Any] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self : Any ) ->float:
'''simple docstring'''
return 1e-4
@property
def snake_case__ ( self : List[str] ) ->int:
'''simple docstring'''
return 12
| 204 | 0 |
'''simple docstring'''
def A_ ( snake_case ):
return str(snake_case ) == str(snake_case )[::-1]
def A_ ( snake_case ):
return int(snake_case ) + int(str(snake_case )[::-1] )
def A_ ( snake_case = 10000 ):
SCREAMING_SNAKE_CASE:str = []
for num in range(1 , snake_case ):
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:str = num
while iterations < 50:
SCREAMING_SNAKE_CASE:Dict = sum_reverse(snake_case )
iterations += 1
if is_palindrome(snake_case ):
break
else:
lychrel_nums.append(snake_case )
return len(snake_case )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 143 | import argparse
import os
import re
_lowercase: Optional[Any] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_lowercase: Union[str, Any] = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
_lowercase: Optional[Any] = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def _lowerCamelCase ( snake_case , snake_case = False ):
with open(snake_case , 'r' , encoding='utf-8' ) as f:
_lowerCAmelCase = f.read()
_lowerCAmelCase = content.split('\n' )
_lowerCAmelCase = []
_lowerCAmelCase = 0
while line_idx < len(snake_case ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_lowerCAmelCase = len(re.search(R'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
_lowerCAmelCase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_lowerCAmelCase = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_lowerCAmelCase = sorted(snake_case , key=lambda snake_case : _re_identifier.search(snake_case ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(snake_case ) )
elif "\n".join(snake_case ) != content:
return True
def _lowerCamelCase ( snake_case = False ):
_lowerCAmelCase = [os.path.join(snake_case , snake_case ) for f in os.listdir(snake_case ) if f.endswith('.py' )]
_lowerCAmelCase = [sort_auto_mapping(snake_case , overwrite=snake_case ) for fname in fnames]
if not overwrite and any(snake_case ):
_lowerCAmelCase = [f for f, d in zip(snake_case , snake_case ) if d]
raise ValueError(
F'The following files have auto mappings that need sorting: {", ".join(snake_case )}. Run `make style` to fix'
' this.' )
if __name__ == "__main__":
_lowercase: Any = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
_lowercase: Tuple = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 192 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
A_ : int = "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
A_ : Optional[int] = BASE_URL + "/user"
# https://github.com/settings/tokens
A_ : int = os.environ.get("USER_TOKEN", "")
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = {
'Authorization': f'''token {auth_token}''',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"{key}: {value}")
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 696 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = 'Speech2TextFeatureExtractor'
lowerCamelCase__ : Dict = 'Speech2TextTokenizer'
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_, **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : Optional[int] = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : int = kwargs.pop('audio', lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = kwargs.pop('sampling_rate', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('text', lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] = self.feature_extractor(lowerCamelCase_, *lowerCamelCase_, sampling_rate=lowerCamelCase_, **lowerCamelCase_ )
if text is not None:
lowerCamelCase__ : List[Any] = self.tokenizer(lowerCamelCase_, **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : Tuple = encodings['input_ids']
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@contextmanager
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = self.tokenizer
yield
lowerCamelCase__ : Optional[int] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
| 696 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __UpperCamelCase () -> None:
print('Making key files...' )
make_key_files('rsa', 1_024 )
print('Key files generation successful.' )
def __UpperCamelCase (lowerCAmelCase : int ) -> tuple[tuple[int, int], tuple[int, int]]:
print('Generating prime p...' )
A = rabinMiller.generate_large_prime(lowerCAmelCase )
print('Generating prime q...' )
A = rabinMiller.generate_large_prime(lowerCAmelCase )
A = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
A = random.randrange(2 ** (key_size - 1), 2 ** (key_size) )
if cryptoMath.gcd(lowerCAmelCase, (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
A = cryptoMath.find_mod_inverse(lowerCAmelCase, (p - 1) * (q - 1) )
A = (n, e)
A = (n, d)
return (public_key, private_key)
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : int ) -> None:
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print('\nWARNING:' )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'Use a different name or delete these files and re-run this program.' )
sys.exit()
A , A = generate_key(lowerCAmelCase )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''', 'w' ) as out_file:
out_file.write(f'''{key_size},{public_key[0]},{public_key[1]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''', 'w' ) as out_file:
out_file.write(f'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 699 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "layer_norm" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = only_cross_attention
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A = AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A = AdaLayerNormZero(UpperCamelCase__ , UpperCamelCase__ )
else:
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = Attention(
query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A = (
AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
)
A = Attention(
query_dim=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , upcast_attention=UpperCamelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
A = None
A = None
# 3. Feed-forward
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = FeedForward(UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn=UpperCamelCase__ , final_dropout=UpperCamelCase__ )
# let chunk size default to None
A = None
A = 0
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
# Sets chunk feed-forward
A = chunk_size
A = dim
def UpperCamelCase ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Dict[str, Any] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A = self.norma(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A = self.norma(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=hidden_states.dtype )
else:
A = self.norma(UpperCamelCase__ )
A = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
if self.use_ada_layer_norm_zero:
A = gate_msa.unsqueeze(1 ) * attn_output
A = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A = (
self.norma(UpperCamelCase__ , UpperCamelCase__ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase__ )
)
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A = attn_output + hidden_states
# 3. Feed-forward
A = self.norma(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A = torch.cat(
[self.ff(UpperCamelCase__ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A = self.ff(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = gate_mlp.unsqueeze(1 ) * ff_output
A = ff_output + hidden_states
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = int(dim * mult )
A = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A = GELU(UpperCamelCase__ , UpperCamelCase__ )
if activation_fn == "gelu-approximate":
A = GELU(UpperCamelCase__ , UpperCamelCase__ , approximate='tanh' )
elif activation_fn == "geglu":
A = GEGLU(UpperCamelCase__ , UpperCamelCase__ )
elif activation_fn == "geglu-approximate":
A = ApproximateGELU(UpperCamelCase__ , UpperCamelCase__ )
A = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase__ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase__ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase__ ) )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int ):
for module in self.net:
A = module(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str = "none" ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
A = approximate
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int ):
A = self.proj(UpperCamelCase__ )
A = self.gelu(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , dim_out * 2 )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Tuple ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase ( self : str , UpperCamelCase__ : str ):
A , A = self.proj(UpperCamelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase__ )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
A = self.proj(UpperCamelCase__ )
return x * torch.sigmoid(1.702 * x )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
super().__init__()
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , embedding_dim * 2 )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ ) ) )
A , A = torch.chunk(UpperCamelCase__ , 2 )
A = self.norm(UpperCamelCase__ ) * (1 + scale) + shift
return x
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
super().__init__()
A = CombinedTimestepLabelEmbeddings(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , 6 * embedding_dim , bias=UpperCamelCase__ )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ , eps=1e-6 )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=UpperCamelCase__ ) ) )
A , A , A , A , A , A = emb.chunk(6 , dim=1 )
A = self.norm(UpperCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : float = 1e-5 ):
super().__init__()
A = num_groups
A = eps
if act_fn is None:
A = None
else:
A = get_activation(UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , out_dim * 2 )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str ):
if self.act:
A = self.act(UpperCamelCase__ )
A = self.linear(UpperCamelCase__ )
A = emb[:, :, None, None]
A , A = emb.chunk(2 , dim=1 )
A = F.group_norm(UpperCamelCase__ , self.num_groups , eps=self.eps )
A = x * (1 + scale) + shift
return x
| 699 | 1 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
_a = MODEL_FOR_CAUSAL_LM_MAPPING
_a = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
__magic_name__ = text_generator('''This is a test''' , do_sample=A )
self.assertEqual(
A , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
__magic_name__ = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
A , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
__magic_name__ = text_generator('''This is a test''' , do_sample=A , num_return_sequences=2 , return_tensors=A )
self.assertEqual(
A , [
{'''generated_token_ids''': ANY(A )},
{'''generated_token_ids''': ANY(A )},
] , )
__magic_name__ = text_generator.model.config.eos_token_id
__magic_name__ = '''<pad>'''
__magic_name__ = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=A , num_return_sequences=2 , batch_size=2 , return_tensors=A , )
self.assertEqual(
A , [
[
{'''generated_token_ids''': ANY(A )},
{'''generated_token_ids''': ANY(A )},
],
[
{'''generated_token_ids''': ANY(A )},
{'''generated_token_ids''': ANY(A )},
],
] , )
@require_tf
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
__magic_name__ = text_generator('''This is a test''' , do_sample=A )
self.assertEqual(
A , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
__magic_name__ = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=A )
self.assertEqual(
A , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def __A ( self , A , A , A ) -> Dict:
'''simple docstring'''
__magic_name__ = TextGenerationPipeline(model=A , tokenizer=A )
return text_generator, ["This is a test", "Another test"]
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = '''Hello I believe in'''
__magic_name__ = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
__magic_name__ = text_generator(A )
self.assertEqual(
A , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
__magic_name__ = text_generator(A , stop_sequence=''' fe''' )
self.assertEqual(A , [{'''generated_text''': '''Hello I believe in fe'''}] )
def __A ( self , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = text_generator.model
__magic_name__ = text_generator.tokenizer
__magic_name__ = text_generator('''This is a test''' )
self.assertEqual(A , [{'''generated_text''': ANY(A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
__magic_name__ = text_generator('''This is a test''' , return_full_text=A )
self.assertEqual(A , [{'''generated_text''': ANY(A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
__magic_name__ = pipeline(task='''text-generation''' , model=A , tokenizer=A , return_full_text=A )
__magic_name__ = text_generator('''This is a test''' )
self.assertEqual(A , [{'''generated_text''': ANY(A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
__magic_name__ = text_generator('''This is a test''' , return_full_text=A )
self.assertEqual(A , [{'''generated_text''': ANY(A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
__magic_name__ = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=A )
self.assertEqual(
A , [
[{'''generated_text''': ANY(A )}, {'''generated_text''': ANY(A )}],
[{'''generated_text''': ANY(A )}, {'''generated_text''': ANY(A )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__magic_name__ = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=A )
self.assertEqual(
A , [
[{'''generated_text''': ANY(A )}, {'''generated_text''': ANY(A )}],
[{'''generated_text''': ANY(A )}, {'''generated_text''': ANY(A )}],
] , )
with self.assertRaises(A ):
__magic_name__ = text_generator('''test''' , return_full_text=A , return_text=A )
with self.assertRaises(A ):
__magic_name__ = text_generator('''test''' , return_full_text=A , return_tensors=A )
with self.assertRaises(A ):
__magic_name__ = text_generator('''test''' , return_text=A , return_tensors=A )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__magic_name__ = text_generator('''''' )
self.assertEqual(A , [{'''generated_text''': ANY(A )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__magic_name__ = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__magic_name__ = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_00_00
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 5_00 , max_new_tokens=20 )
__magic_name__ = text_generator('''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(A ):
text_generator(
'''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __A ( self ) -> Any:
'''simple docstring'''
import torch
# Classic `model_kwargs`
__magic_name__ = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__magic_name__ = pipe('''This is a test''' )
self.assertEqual(
A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__magic_name__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__magic_name__ = pipe('''This is a test''' )
self.assertEqual(
A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__magic_name__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__magic_name__ = pipe('''This is a test''' )
self.assertEqual(
A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def __A ( self ) -> Tuple:
'''simple docstring'''
import torch
__magic_name__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def __A ( self ) -> Optional[int]:
'''simple docstring'''
import torch
__magic_name__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=A , top_p=0.5 )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = '''Hello world'''
__magic_name__ = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
__magic_name__ = logging.get_logger('''transformers.generation.tf_utils''' )
else:
__magic_name__ = logging.get_logger('''transformers.generation.utils''' )
__magic_name__ = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(A ) as cl:
__magic_name__ = text_generator(A , max_length=10 , max_new_tokens=1 )
self.assertIn(A , cl.out )
# The user only sets one -> no warning
with CaptureLogger(A ) as cl:
__magic_name__ = text_generator(A , max_new_tokens=1 )
self.assertNotIn(A , cl.out )
with CaptureLogger(A ) as cl:
__magic_name__ = text_generator(A , max_length=10 )
self.assertNotIn(A , cl.out ) | 708 |
a_ : Dict = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a_ : str = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : str , snake_case_ : str ):
__magic_name__ = from_type.lower().strip('''s''' )
__magic_name__ = to_type.lower().strip('''s''' )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
__magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ )
if from_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
if to_sanitized not in METRIC_CONVERSION:
__magic_name__ = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(snake_case_ )}'
)
raise ValueError(snake_case_ )
__magic_name__ = METRIC_CONVERSION[from_sanitized]
__magic_name__ = METRIC_CONVERSION[to_sanitized]
__magic_name__ = 1
if from_exponent > to_exponent:
__magic_name__ = from_exponent - to_exponent
else:
__magic_name__ = -(to_exponent - from_exponent)
return value * pow(10 , snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 678 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""pixel_values"""]
def __init__( self : Tuple , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 2_55 , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : bool = True , **__lowercase : Optional[int] , ) -> None:
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any =size if size is not None else {'''shortest_edge''': 2_24}
SCREAMING_SNAKE_CASE__ : str =get_size_dict(__lowercase , default_to_square=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
SCREAMING_SNAKE_CASE__ : List[Any] =get_size_dict(__lowercase , default_to_square=__lowercase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =do_resize
SCREAMING_SNAKE_CASE__ : Any =size
SCREAMING_SNAKE_CASE__ : List[Any] =resample
SCREAMING_SNAKE_CASE__ : Optional[int] =do_center_crop
SCREAMING_SNAKE_CASE__ : Union[str, Any] =crop_size
SCREAMING_SNAKE_CASE__ : Dict =do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] =rescale_factor
SCREAMING_SNAKE_CASE__ : Dict =do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE__ : Tuple =image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE__ : str =do_convert_rgb
def __magic_name__ ( self : List[str] , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : int , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : Any =get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
SCREAMING_SNAKE_CASE__ : List[Any] =get_resize_output_image_size(__lowercase , size=size['''shortest_edge'''] , default_to_square=__lowercase )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Optional[Any] , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : Any =get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__lowercase , size=(size['''height'''], size['''width''']) , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : np.ndarray , __lowercase : Union[int, float] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Tuple , ) -> List[Any]:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Tuple , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[int] , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def __magic_name__ ( self : Tuple , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : int = None , __lowercase : bool = None , __lowercase : float = None , __lowercase : bool = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : bool = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , **__lowercase : int , ) -> PIL.Image.Image:
SCREAMING_SNAKE_CASE__ : Dict =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Any =size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =get_size_dict(__lowercase , param_name='''size''' , default_to_square=__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Tuple =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : Tuple =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Any =get_size_dict(__lowercase , param_name='''crop_size''' , default_to_square=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Dict =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[Any] =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : List[str] =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : List[str] =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Dict =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE__ : Tuple =make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[convert_to_rgb(__lowercase ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Dict =[to_numpy_array(__lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[str] =[self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : str =[self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Any =[to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[Any] ={'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase ) | 296 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : List[Any] , __lowercase : Optional[NestedDataStructureLike[PathLike]] = None , __lowercase : Optional[NamedSplit] = None , __lowercase : Optional[Features] = None , __lowercase : str = None , __lowercase : bool = False , __lowercase : bool = False , __lowercase : Optional[int] = None , **__lowercase : Tuple , ) -> Dict:
SCREAMING_SNAKE_CASE__ : Any =path_or_paths
SCREAMING_SNAKE_CASE__ : Dict =split if split or isinstance(__lowercase , __lowercase ) else '''train'''
SCREAMING_SNAKE_CASE__ : Dict =features
SCREAMING_SNAKE_CASE__ : Union[str, Any] =cache_dir
SCREAMING_SNAKE_CASE__ : Tuple =keep_in_memory
SCREAMING_SNAKE_CASE__ : str =streaming
SCREAMING_SNAKE_CASE__ : str =num_proc
SCREAMING_SNAKE_CASE__ : Tuple =kwargs
@abstractmethod
def __magic_name__ ( self : List[str] ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : Optional[int] , __lowercase : Optional[Features] = None , __lowercase : str = None , __lowercase : bool = False , __lowercase : bool = False , __lowercase : Optional[int] = None , **__lowercase : List[Any] , ) -> Any:
SCREAMING_SNAKE_CASE__ : Any =features
SCREAMING_SNAKE_CASE__ : str =cache_dir
SCREAMING_SNAKE_CASE__ : Tuple =keep_in_memory
SCREAMING_SNAKE_CASE__ : Optional[int] =streaming
SCREAMING_SNAKE_CASE__ : List[str] =num_proc
SCREAMING_SNAKE_CASE__ : str =kwargs
@abstractmethod
def __magic_name__ ( self : Any ) -> Union[Dataset, IterableDataset]:
pass | 296 | 1 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = None
UpperCamelCase = None
lowerCamelCase : str = namedtuple("CoinsDistribResult", "moves excess")
def _SCREAMING_SNAKE_CASE ( lowercase : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase ) != count_coins(lowercase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowercase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.left )
lowerCamelCase_ , lowerCamelCase_ = get_distrib(node.right )
lowerCamelCase_ = 1 - left_distrib_excess
lowerCamelCase_ = 1 - right_distrib_excess
lowerCamelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase )
+ abs(lowercase )
)
lowerCamelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase , lowercase )
return get_distrib(lowercase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] , A_ : Tuple , A_ : str , A_ : int ) -> Any:
"""simple docstring"""
self.assertEqual(len(A_ ) , len(A_ ) )
for a, b in zip(A_ , A_ ):
self.assertAlmostEqual(A_ , A_ , delta=A_ )
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(A_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = None
ops.enable_eager_execution_internal()
lowerCamelCase_ = tf.config.list_physical_devices('CPU' )
if len(A_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCamelCase_ = tf.config.list_logical_devices(device_type='CPU' )
lowerCamelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCamelCase_ = GradientAccumulator()
lowerCamelCase_ = tf.Variable([4.0, 3.0] )
lowerCamelCase_ , lowerCamelCase_ = create_optimizer(5E-5 , 10 , 5 )
lowerCamelCase_ = tf.Variable([0.0, 0.0] , trainable=A_ )
def accumulate_on_replica(A_ : Any ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(A_ : List[Any] , A_ : Tuple ):
with strategy.scope():
lowerCamelCase_ = strategy.experimental_local_results(A_ )
local_variables[0].assign(A_ )
local_variables[1].assign(A_ )
strategy.run(A_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(A_ )
def _check_local_values(A_ : List[Any] , A_ : str ):
lowerCamelCase_ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , A_ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , A_ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 651 | 0 |
'''simple docstring'''
_lowercase = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowercase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowercase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 5 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = '''decision_transformer'''
_lowercase : Optional[Any] = ['''past_key_values''']
_lowercase : str = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowercase=17 , _lowercase=4 , _lowercase=128 , _lowercase=4_096 , _lowercase=True , _lowercase=1 , _lowercase=1_024 , _lowercase=3 , _lowercase=1 , _lowercase=None , _lowercase="relu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1e-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=50_256 , _lowercase=50_256 , _lowercase=False , _lowercase=False , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = state_dim
_lowerCAmelCase = act_dim
_lowerCAmelCase = hidden_size
_lowerCAmelCase = max_ep_len
_lowerCAmelCase = action_tanh
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = scale_attn_by_inverse_layer_idx
_lowerCAmelCase = reorder_and_upcast_attn
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 5 | 1 |
'''simple docstring'''
def _a ( lowerCamelCase_ ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
snake_case : int =0
snake_case : Tuple =str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
snake_case : Any =[int(lowerCamelCase_ ) for i in num_string]
snake_case : Union[str, Any] =1
for i in range(0 , len(lowerCamelCase_ ) ):
total *= numbers[i]
snake_case : Dict =str(lowerCamelCase_ )
steps += 1
return steps
def _a ( lowerCamelCase_ ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
snake_case : List[Any] =0
snake_case : Dict =str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
snake_case : int =[int(lowerCamelCase_ ) for i in num_string]
snake_case : List[Any] =0
for i in range(0 , len(lowerCamelCase_ ) ):
total += numbers[i]
snake_case : int =str(lowerCamelCase_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _a ( lowerCamelCase_ ):
snake_case : List[Any] =prime_factors(lowerCamelCase_ )
if is_square_free(lowerCamelCase_ ):
return -1 if len(lowerCamelCase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 | 0 |
from __future__ import annotations
from cmath import sqrt
def lowerCAmelCase__( lowercase : Optional[int] , lowercase : Optional[int] , lowercase : int ) -> Optional[Any]:
if a == 0:
raise ValueError("Coefficient \'a\' must not be zero." )
__snake_case : Tuple = b * b - 4 * a * c
__snake_case : List[Any] = (-b + sqrt(a__ )) / (2 * a)
__snake_case : List[str] = (-b - sqrt(a__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCAmelCase__( ) -> str:
__snake_case , __snake_case : Dict = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 243 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
snake_case_ : Optional[Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase__ )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "rag"
UpperCAmelCase = True
def __init__( self : Tuple , _a : List[Any]=None , _a : Tuple=True , _a : Optional[Any]=None , _a : int=None , _a : List[str]=None , _a : int=None , _a : Optional[int]=None , _a : str=" / " , _a : Any=" // " , _a : Optional[Any]=5 , _a : int=300 , _a : Optional[Any]=768 , _a : Any=8 , _a : List[str]="wiki_dpr" , _a : Dict="train" , _a : Union[str, Any]="compressed" , _a : str=None , _a : Union[str, Any]=None , _a : int=False , _a : Any=False , _a : Any=0.0 , _a : Any=True , _a : List[str]=False , _a : Optional[int]=False , _a : int=False , _a : Union[str, Any]=True , _a : Optional[int]=None , **_a : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=_a , pad_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , is_encoder_decoder=_a , prefix=_a , vocab_size=_a , **_a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_SCREAMING_SNAKE_CASE =kwargs.pop('''question_encoder''' )
_SCREAMING_SNAKE_CASE =question_encoder_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE =kwargs.pop('''generator''' )
_SCREAMING_SNAKE_CASE =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =reduce_loss
_SCREAMING_SNAKE_CASE =label_smoothing
_SCREAMING_SNAKE_CASE =exclude_bos_score
_SCREAMING_SNAKE_CASE =do_marginalize
_SCREAMING_SNAKE_CASE =title_sep
_SCREAMING_SNAKE_CASE =doc_sep
_SCREAMING_SNAKE_CASE =n_docs
_SCREAMING_SNAKE_CASE =max_combined_length
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =dataset_split
_SCREAMING_SNAKE_CASE =index_name
_SCREAMING_SNAKE_CASE =retrieval_vector_size
_SCREAMING_SNAKE_CASE =retrieval_batch_size
_SCREAMING_SNAKE_CASE =passages_path
_SCREAMING_SNAKE_CASE =index_path
_SCREAMING_SNAKE_CASE =use_dummy_dataset
_SCREAMING_SNAKE_CASE =output_retrieved
_SCREAMING_SNAKE_CASE =do_deduplication
_SCREAMING_SNAKE_CASE =use_cache
if self.forced_eos_token_id is None:
_SCREAMING_SNAKE_CASE =getattr(self.generator , '''forced_eos_token_id''' , _a )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , _a : PretrainedConfig , _a : PretrainedConfig , **_a : Dict ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.question_encoder.to_dict()
_SCREAMING_SNAKE_CASE =self.generator.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output | 691 | 0 |
def a ( _UpperCAmelCase : Any ):
'''simple docstring'''
if num < 0:
return False
__UpperCAmelCase : Optional[int] = num
__UpperCAmelCase : Optional[int] = 0
while num > 0:
__UpperCAmelCase : List[str] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__A ="%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
__A =f'''https://www.google.com/search?q={query}&num=100'''
__A =requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
__A =(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
__A =parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 241 | 0 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , """num_attention_heads""" ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Tuple , lowerCamelCase : Any , lowerCamelCase : Optional[int]=13 , lowerCamelCase : List[str]=32 , lowerCamelCase : Any=2 , lowerCamelCase : Dict=3 , lowerCamelCase : int=640 , lowerCamelCase : Optional[int]=4 , lowerCamelCase : Tuple="silu" , lowerCamelCase : Dict=3 , lowerCamelCase : Tuple=32 , lowerCamelCase : Any=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : Optional[Any]=0.02 , lowerCamelCase : Optional[Any]=True , lowerCamelCase : int=True , lowerCamelCase : Union[str, Any]=10 , lowerCamelCase : Dict=None , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = last_hidden_size
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = conv_kernel_size
_UpperCAmelCase = output_stride
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = classifier_dropout_prob
_UpperCAmelCase = use_labels
_UpperCAmelCase = is_training
_UpperCAmelCase = num_labels
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
def lowerCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase ( self : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = MobileViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : str ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : str ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileViTForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_UpperCAmelCase = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = MobileViTModelTester(self )
_UpperCAmelCase = MobileViTConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def lowerCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def lowerCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
pass
def lowerCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(lowerCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
pass
def lowerCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowerCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str ):
_UpperCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = 5
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_UpperCAmelCase = 2
for i in range(len(lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@slow
def lowerCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = MobileViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(lowerCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**lowerCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
_UpperCAmelCase = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
_UpperCAmelCase = model.to(lowerCamelCase )
_UpperCAmelCase = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**lowerCamelCase )
_UpperCAmelCase = outputs.logits
# verify the logits
_UpperCAmelCase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCamelCase )
_UpperCAmelCase = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
_UpperCAmelCase = model.to(lowerCamelCase )
_UpperCAmelCase = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**lowerCamelCase )
_UpperCAmelCase = outputs.logits.detach().cpu()
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase , target_sizes=[(50, 60)] )
_UpperCAmelCase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
_UpperCAmelCase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCamelCase ) | 108 |
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
_UpperCamelCase = int(input("""Enter number: """).strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 341 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class snake_case_ ( unittest.TestCase):
def __lowercase ( self ) -> int:
lowerCamelCase : int =inspect.getfile(accelerate.test_utils )
lowerCamelCase : Optional[int] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
lowerCamelCase : str =os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
lowerCamelCase : int =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def __lowercase ( self ) -> int:
print(F"Found {torch.cuda.device_count()} devices." )
lowerCamelCase : Optional[Any] =['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
@require_multi_gpu
def __lowercase ( self ) -> str:
print(F"Found {torch.cuda.device_count()} devices." )
lowerCamelCase : Tuple =['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
@require_multi_gpu
def __lowercase ( self ) -> Any:
lowerCamelCase : str =['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
@require_multi_gpu
def __lowercase ( self ) -> Optional[int]:
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" )
lowerCamelCase : Any =['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(__a , env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ = Accelerator()
snake_case_ = (accelerator.state.process_index + 2, 1_0)
snake_case_ = torch.randint(0, 1_0, shape).to(accelerator.device)
snake_case_ = ''''''
snake_case_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
snake_case_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
snake_case_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 712 |
from collections import defaultdict
from math import ceil, sqrt
def A__ ( SCREAMING_SNAKE_CASE_ = 1_0_0_0_0_0_0 , SCREAMING_SNAKE_CASE_ = 1_0 ) -> int:
lowerCamelCase : defaultdict =defaultdict(SCREAMING_SNAKE_CASE_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCamelCase : Any =max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCamelCase : str =1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 262 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
a_ = logging.get_logger(__name__)
# General docstring
a_ = 'RegNetConfig'
# Base docstring
a_ = 'facebook/regnet-y-040'
a_ = [1, 1_088, 7, 7]
# Image classification docstring
a_ = 'facebook/regnet-y-040'
a_ = 'tabby, tabby cat'
a_ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self: Optional[Any] , a: int , a: int = 3 , a: int = 1 , a: int = 1 , a: Optional[str] = "relu" , **a: Any , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**a)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
a_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
a_ = tf.keras.layers.ConvaD(
filters=a , kernel_size=a , strides=a , padding="VALID" , groups=a , use_bias=a , name="convolution" , )
a_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization")
a_ = ACTaFN[activation] if activation is not None else tf.identity
def _lowerCAmelCase ( self: Optional[Any] , a: Tuple) ->Tuple:
'''simple docstring'''
a_ = self.convolution(self.padding(a))
a_ = self.normalization(a)
a_ = self.activation(a)
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self: Dict , a: RegNetConfig , **a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**a)
a_ = config.num_channels
a_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def _lowerCAmelCase ( self: Optional[Any] , a: Any) ->Optional[int]:
'''simple docstring'''
a_ = shape_list(a)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration.")
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
a_ = tf.transpose(a , perm=(0, 2, 3, 1))
a_ = self.embedder(a)
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self: Any , a: int , a: int = 2 , **a: int) ->List[str]:
'''simple docstring'''
super().__init__(**a)
a_ = tf.keras.layers.ConvaD(
filters=a , kernel_size=1 , strides=a , use_bias=a , name="convolution")
a_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization")
def _lowerCAmelCase ( self: Optional[Any] , a: tf.Tensor , a: bool = False) ->tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(a) , training=a)
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self: Tuple , a: int , a: int , **a: Any) ->Optional[int]:
'''simple docstring'''
super().__init__(**a)
a_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a , name="pooler")
a_ = [
tf.keras.layers.ConvaD(filters=a , kernel_size=1 , activation="relu" , name="attention.0"),
tf.keras.layers.ConvaD(filters=a , kernel_size=1 , activation="sigmoid" , name="attention.2"),
]
def _lowerCAmelCase ( self: List[str] , a: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = self.pooler(a)
for layer_module in self.attention:
a_ = layer_module(a)
a_ = hidden_state * pooled
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self: Optional[int] , a: RegNetConfig , a: int , a: int , a: int = 1 , **a: Any) ->Tuple:
'''simple docstring'''
super().__init__(**a)
a_ = in_channels != out_channels or stride != 1
a_ = max(1 , out_channels // config.groups_width)
a_ = (
TFRegNetShortCut(a , stride=a , name="shortcut")
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut")
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
a_ = [
TFRegNetConvLayer(a , kernel_size=1 , activation=config.hidden_act , name="layer.0"),
TFRegNetConvLayer(
a , stride=a , groups=a , activation=config.hidden_act , name="layer.1"),
TFRegNetConvLayer(a , kernel_size=1 , activation=a , name="layer.2"),
]
a_ = ACTaFN[config.hidden_act]
def _lowerCAmelCase ( self: List[Any] , a: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = hidden_state
for layer_module in self.layers:
a_ = layer_module(a)
a_ = self.shortcut(a)
hidden_state += residual
a_ = self.activation(a)
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self: Optional[Any] , a: RegNetConfig , a: int , a: int , a: int = 1 , **a: List[Any]) ->List[str]:
'''simple docstring'''
super().__init__(**a)
a_ = in_channels != out_channels or stride != 1
a_ = max(1 , out_channels // config.groups_width)
a_ = (
TFRegNetShortCut(a , stride=a , name="shortcut")
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut")
)
a_ = [
TFRegNetConvLayer(a , kernel_size=1 , activation=config.hidden_act , name="layer.0"),
TFRegNetConvLayer(
a , stride=a , groups=a , activation=config.hidden_act , name="layer.1"),
TFRegNetSELayer(a , reduced_channels=int(round(in_channels / 4)) , name="layer.2"),
TFRegNetConvLayer(a , kernel_size=1 , activation=a , name="layer.3"),
]
a_ = ACTaFN[config.hidden_act]
def _lowerCAmelCase ( self: str , a: Any) ->List[str]:
'''simple docstring'''
a_ = hidden_state
for layer_module in self.layers:
a_ = layer_module(a)
a_ = self.shortcut(a)
hidden_state += residual
a_ = self.activation(a)
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self: Optional[Any] , a: RegNetConfig , a: int , a: int , a: int = 2 , a: int = 2 , **a: Optional[int]) ->List[str]:
'''simple docstring'''
super().__init__(**a)
a_ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
a_ = [
# downsampling is done in the first layer with stride of 2
layer(a , a , a , stride=a , name="layers.0"),
*[layer(a , a , a , name=f"""layers.{i+1}""") for i in range(depth - 1)],
]
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->Optional[int]:
'''simple docstring'''
for layer_module in self.layers:
a_ = layer_module(a)
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self: Tuple , a: RegNetConfig , **a: Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
super().__init__(**a)
a_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
a , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ))
a_ = zip(config.hidden_sizes , config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(a , config.depths[1:])):
self.stages.append(TFRegNetStage(a , a , a , depth=a , name=f"""stages.{i+1}"""))
def _lowerCAmelCase ( self: List[Any] , a: tf.Tensor , a: bool = False , a: bool = True) ->TFBaseModelOutputWithNoAttention:
'''simple docstring'''
a_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
a_ = hidden_states + (hidden_state,)
a_ = stage_module(a)
if output_hidden_states:
a_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=a , hidden_states=a)
@keras_serializable
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
_UpperCAmelCase =RegNetConfig
def __init__( self: Optional[Any] , a: List[str] , **a: Optional[int]) ->str:
'''simple docstring'''
super().__init__(**a)
a_ = config
a_ = TFRegNetEmbeddings(a , name="embedder")
a_ = TFRegNetEncoder(a , name="encoder")
a_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a , name="pooler")
@unpack_inputs
def _lowerCAmelCase ( self: Optional[int] , a: tf.Tensor , a: Optional[bool] = None , a: Optional[bool] = None , a: bool = False , ) ->TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = self.embedder(a , training=a)
a_ = self.encoder(
a , output_hidden_states=a , return_dict=a , training=a)
a_ = encoder_outputs[0]
a_ = self.pooler(a)
# Change to NCHW output format have uniformity in the modules
a_ = tf.transpose(a , perm=(0, 3, 1, 2))
a_ = tf.transpose(a , perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
a_ = tuple([tf.transpose(a , perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a , pooler_output=a , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =RegNetConfig
_UpperCAmelCase ='''regnet'''
_UpperCAmelCase ='''pixel_values'''
@property
def _lowerCAmelCase ( self: Optional[Any]) ->int:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa)}
a_ = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
a_ = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , lowercase_ , )
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: Union[str, Any] , a: RegNetConfig , *a: str , **a: Any) ->Any:
'''simple docstring'''
super().__init__(a , *a , **a)
a_ = TFRegNetMainLayer(a , name="regnet")
@unpack_inputs
@add_start_docstrings_to_model_forward(a)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCAmelCase ( self: str , a: tf.Tensor , a: Optional[bool] = None , a: Optional[bool] = None , a: List[str]=False , ) ->Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = self.regnet(
pixel_values=a , output_hidden_states=a , return_dict=a , training=a , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , lowercase_ , )
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
def __init__( self: List[Any] , a: RegNetConfig , *a: List[str] , **a: Any) ->List[Any]:
'''simple docstring'''
super().__init__(a , *a , **a)
a_ = config.num_labels
a_ = TFRegNetMainLayer(a , name="regnet")
# classification head
a_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1") if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(a)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCAmelCase ( self: Any , a: tf.Tensor = None , a: tf.Tensor = None , a: bool = None , a: bool = None , a: Tuple=False , ) ->Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = self.regnet(
a , output_hidden_states=a , return_dict=a , training=a)
a_ = outputs.pooler_output if return_dict else outputs[1]
a_ = self.classifier[0](a)
a_ = self.classifier[1](a)
a_ = None if labels is None else self.hf_compute_loss(labels=a , logits=a)
if not return_dict:
a_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=a , logits=a , hidden_states=outputs.hidden_states)
| 685 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 100 ) -> int:
'''simple docstring'''
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if index == r:
for j in range(lowerCAmelCase__ ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__a = arr[i]
combination_util(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , index + 1 , lowerCAmelCase__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# A temporary array to store all combination one by one
__a = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , 0 , lowerCAmelCase__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
SCREAMING_SNAKE_CASE = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 209 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['YolosFeatureExtractor']
SCREAMING_SNAKE_CASE = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 209 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __snake_case , unittest.TestCase ):
"""simple docstring"""
snake_case = BlenderbotSmallTokenizer
snake_case = False
def lowerCamelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
super().setUp()
A_ = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
A_ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
A_ = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
A_ = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
def lowerCamelCase__ ( self : List[Any] , **_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def lowerCamelCase__ ( self : int , _snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
A_ = "adapt act apte"
A_ = "adapt act apte"
return input_text, output_text
def lowerCamelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
A_ = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A_ = "adapt act apte"
A_ = ["adapt", "act", "ap@@", "te"]
A_ = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
A_ = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
A_ = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
A_ = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1_384]
A_ = "I am a small frog."
A_ = tok([src_text] , padding=_snake_case , truncation=_snake_case )["input_ids"]
A_ = tok.batch_decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def lowerCamelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
A_ = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
A_ = "I am a small frog ."
A_ = "."
A_ = tok(_snake_case )["input_ids"]
A_ = tok(_snake_case )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 115 | import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = pd.read_csv("""sample_data.csv""", header=None)
lowerCamelCase_ : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
lowerCamelCase_ : Union[str, Any] = df.iloc[:, 1:2]
lowerCamelCase_ : str = actual_data.values.reshape(len_data, 1)
lowerCamelCase_ : List[Any] = MinMaxScaler().fit_transform(actual_data)
lowerCamelCase_ : List[str] = 10
lowerCamelCase_ : Tuple = 5
lowerCamelCase_ : Optional[Any] = 20
lowerCamelCase_ : List[Any] = len_data - periods * look_back
lowerCamelCase_ : Union[str, Any] = actual_data[:division]
lowerCamelCase_ : Dict = actual_data[division - look_back :]
lowerCamelCase_ , lowerCamelCase_ : int = [], []
lowerCamelCase_ , lowerCamelCase_ : List[Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowerCamelCase_ : Dict = np.array(train_x)
lowerCamelCase_ : Union[str, Any] = np.array(test_x)
lowerCamelCase_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
lowerCamelCase_ : Any = np.array([list(i.ravel()) for i in test_y])
lowerCamelCase_ : int = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
lowerCamelCase_ : Optional[int] = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
lowerCamelCase_ : int = model.predict(x_test)
| 559 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
a_ : Optional[Any] =1
@register_to_config
def __init__( self : Dict , UpperCamelCase : int=20_00 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : List[str]=20 , UpperCamelCase : Dict=1e-3 ):
'''simple docstring'''
_snake_case : str = None
_snake_case : int = None
_snake_case : int = None
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = torch.linspace(1 , self.config.sampling_eps , UpperCamelCase , device=UpperCamelCase )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any]=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_snake_case : Optional[Any] = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_snake_case : List[str] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_snake_case : Dict = std.flatten()
while len(std.shape ) < len(score.shape ):
_snake_case : Optional[Any] = std.unsqueeze(-1 )
_snake_case : Dict = -score / std
# compute
_snake_case : Dict = -1.0 / len(self.timesteps )
_snake_case : Optional[int] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_snake_case : Dict = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_snake_case : List[str] = beta_t.unsqueeze(-1 )
_snake_case : List[Any] = -0.5 * beta_t * x
_snake_case : Union[str, Any] = torch.sqrt(UpperCamelCase )
_snake_case : List[str] = drift - diffusion**2 * score
_snake_case : int = x + drift * dt
# add noise
_snake_case : Any = randn_tensor(x.shape , layout=x.layout , generator=UpperCamelCase , device=x.device , dtype=x.dtype )
_snake_case : Tuple = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 669 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase_ = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase_ = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =200
a_ : List[str] ={"""Content-Length""": """100"""}
a_ : Tuple ={}
def UpperCamelCase_ ( self : Any , **UpperCamelCase : Any ):
'''simple docstring'''
return [bytes(UpperCamelCase , 'utf-8' )]
def lowerCamelCase_ ( *lowerCAmelCase: Tuple , **lowerCAmelCase: Tuple )-> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> Optional[Any]:
import requests
monkeypatch.setattr(lowerCAmelCase , 'request' , lowerCAmelCase )
_snake_case : List[str] = URL
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[int] = url
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = [url]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': url}
_snake_case : int = 'dummy'
_snake_case : Optional[Any] = 'downloads'
_snake_case : Union[str, Any] = tmp_path
_snake_case : Dict = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase , lowerCAmelCase ) , use_etag=lowerCAmelCase , )
_snake_case : str = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Optional[int] = dl_manager.download(lowerCAmelCase )
_snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = [downloaded_paths]
_snake_case : List[str] = [urls]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
_snake_case : Any = downloaded_paths.values()
_snake_case : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase , lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case : str = Path(lowerCAmelCase )
_snake_case : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case : List[str] = downloaded_path.read_text()
assert content == CONTENT
_snake_case : Any = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any )-> str:
_snake_case : str = str(lowerCAmelCase )
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : str = filename
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[Any] = [filename]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': filename}
_snake_case : Any = 'dummy'
_snake_case : Union[str, Any] = xz_file.parent
_snake_case : int = 'extracted'
_snake_case : Union[str, Any] = DownloadConfig(
cache_dir=lowerCAmelCase , use_etag=lowerCAmelCase , )
_snake_case : List[str] = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Dict = dl_manager.extract(lowerCAmelCase )
_snake_case : Optional[int] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = [extracted_paths]
_snake_case : int = [paths]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in extracted_paths.keys()
_snake_case : Optional[int] = extracted_paths.values()
_snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase , lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case : List[str] = Path(lowerCAmelCase )
_snake_case : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase , etag=lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case : Optional[int] = extracted_path.read_text()
_snake_case : int = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> Dict:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCAmelCase , start=1 ):
_snake_case : Dict = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> Dict:
_snake_case : List[str] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: int )-> str:
_snake_case : List[Any] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase_ ( lowerCAmelCase: Any )-> int:
_snake_case : Tuple = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase ) , start=1 ):
assert os.path.basename(lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 669 | 1 |