code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> dict[str, float]:
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 604 | def __UpperCAmelCase ( UpperCAmelCase = 50 )-> int:
"""simple docstring"""
lowercase = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2, 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"{solution() = }")
| 604 | 1 |
from __future__ import annotations
from math import pow, sqrt
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowercase_ , 2 ) + pow(lowercase_ , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 161 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( __snake_case , unittest.TestCase ):
lowerCamelCase_ : Dict = PegasusTokenizer
lowerCamelCase_ : Optional[int] = PegasusTokenizerFast
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : Optional[int] = True
def lowerCAmelCase_ ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = PegasusTokenizer(lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase_ ( self ) -> str:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def lowerCAmelCase_ ( self , **lowerCamelCase ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Optional[Any]:
return ("This is a test", "This is a test")
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case_ = """</s>"""
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(lowerCamelCase ) , 1103 )
def lowerCAmelCase_ ( self ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
snake_case_ = rust_tokenizer([raw_input_str] , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids[0]
snake_case_ = py_tokenizer([raw_input_str] , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids[0]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case_ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
snake_case_ = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
snake_case_ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
snake_case_ = tokenizer([raw_input_str] , return_tensors=lowerCamelCase ).input_ids[0]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case_ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
snake_case_ = """To ensure a smooth flow of bank resolutions."""
snake_case_ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
snake_case_ = tokenizer([raw_input_str] , return_tensors=lowerCamelCase ).input_ids[0]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case_ = ["""This is going to be way too long.""" * 150, """short example"""]
snake_case_ = ["""not super long but more than 5 tokens""", """tiny"""]
snake_case_ = self._large_tokenizer(lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , return_tensors="""pt""" )
snake_case_ = self._large_tokenizer(
text_target=lowerCamelCase , max_length=5 , padding=lowerCamelCase , truncation=lowerCamelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase ) == 2 # input_ids, attention_mask.
@slow
def lowerCAmelCase_ ( self ) -> Optional[int]:
# fmt: off
snake_case_ = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( __snake_case , unittest.TestCase ):
lowerCamelCase_ : Optional[Any] = PegasusTokenizer
lowerCamelCase_ : int = PegasusTokenizerFast
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : int = True
def lowerCAmelCase_ ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = PegasusTokenizer(lowerCamelCase , offset=0 , mask_token_sent=lowerCamelCase , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase_ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def lowerCAmelCase_ ( self , **lowerCamelCase ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> int:
return ("This is a test", "This is a test")
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
snake_case_ = rust_tokenizer([raw_input_str] , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids[0]
snake_case_ = py_tokenizer([raw_input_str] , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids[0]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@require_torch
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case_ = ["""This is going to be way too long.""" * 1000, """short example"""]
snake_case_ = ["""not super long but more than 5 tokens""", """tiny"""]
snake_case_ = self._large_tokenizer(lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , return_tensors="""pt""" )
snake_case_ = self._large_tokenizer(
text_target=lowerCamelCase , max_length=5 , padding=lowerCamelCase , truncation=lowerCamelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase ) == 2 # input_ids, attention_mask.
def lowerCAmelCase_ ( self ) -> int:
snake_case_ = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
snake_case_ = self._large_tokenizer(lowerCamelCase ).input_ids
self.assertListEqual(
lowerCamelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , ) | 161 | 1 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict ):
# Construct model
if gpta_config_file == "":
__a : Tuple = GPTaConfig()
else:
__a : Optional[int] = GPTaConfig.from_json_file(lowerCAmelCase__ )
__a : Optional[Any] = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
__a : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__a : List[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
lowercase__ =parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 521 |
from collections.abc import Sequence
from queue import Queue
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )->List[str]:
'''simple docstring'''
A_ : List[str] = start
A_ : Dict = end
A_ : Optional[Any] = val
A_ : Optional[int] = (start + end) // 2
A_ : List[Any] = left
A_ : Any = right
def __repr__( self )->List[Any]:
'''simple docstring'''
return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
A_ : Union[str, Any] = collection
A_ : int = function
if self.collection:
A_ : Tuple = self._build_tree(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
self._update_tree(self.root , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
return self._query_range(self.root , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
if start == end:
return SegmentTreeNode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.collection[start] )
A_ : List[str] = (start + end) // 2
A_ : str = self._build_tree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = self._build_tree(mid + 1 , _SCREAMING_SNAKE_CASE )
return SegmentTreeNode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.fn(left.val , right.val ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
if node.start == i and node.end == i:
A_ : str = val
return
if i <= node.mid:
self._update_tree(node.left , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
self._update_tree(node.right , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[str] = self.fn(node.left.val , node.right.val )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _SCREAMING_SNAKE_CASE , node.mid ) , self._query_range(node.right , node.mid + 1 , _SCREAMING_SNAKE_CASE ) , )
else:
# range in right child tree
return self._query_range(node.right , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
if self.root is not None:
A_ : Any = Queue()
queue.put(self.root )
while not queue.empty():
A_ : List[str] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
UpperCamelCase = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 590 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoForCausalLM',
'GPTNeoForQuestionAnswering',
'GPTNeoForSequenceClassification',
'GPTNeoForTokenClassification',
'GPTNeoModel',
'GPTNeoPreTrainedModel',
'load_tf_weights_in_gpt_neo',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'FlaxGPTNeoForCausalLM',
'FlaxGPTNeoModel',
'FlaxGPTNeoPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709 | """simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''torch''', '''scipy''']
def __init__( self : List[Any] , *__UpperCamelCase : int , **__UpperCamelCase : Any ) -> Any:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : List[Any] ) -> str:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls : str , *__UpperCamelCase : Any , **__UpperCamelCase : int ) -> int:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 342 | 0 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
_snake_case = TypeVar("T")
_snake_case = Union[List[T], Tuple[T, ...]]
_snake_case = Union[T, List[T], Dict[str, T]]
_snake_case = Union[str, bytes, os.PathLike]
| 500 |
import math
import sys
def A ( _lowerCamelCase ):
'''simple docstring'''
if number != int(_lowerCamelCase ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
_lowerCAmelCase : Union[str, Any] = [-1] * (number + 1)
_lowerCAmelCase : Optional[Any] = 0
for i in range(1 , number + 1 ):
_lowerCAmelCase : List[Any] = sys.maxsize
_lowerCAmelCase : str = int(math.sqrt(_lowerCamelCase ) )
for j in range(1 , root + 1 ):
_lowerCAmelCase : Dict = 1 + answers[i - (j**2)]
_lowerCAmelCase : List[str] = min(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 500 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
A_ = None
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
A_ = {
"camembert-base": 5_12,
}
A_ = "▁"
class UpperCamelCase__ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["input_ids", "attention_mask"]
_snake_case = CamembertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="<mask>" , SCREAMING_SNAKE_CASE=["<s>NOTUSED", "</s>NOTUSED"] , **SCREAMING_SNAKE_CASE , ) -> List[Any]:
__lowerCAmelCase : Optional[Any] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , )
__lowerCAmelCase : List[str] = vocab_file
__lowerCAmelCase : Optional[Any] = False if not self.vocab_file else True
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
__lowerCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
__lowerCAmelCase : int = [self.sep_token_id]
__lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase : List[Any] = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 703 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
A_ = None
A_ = logging.get_logger(__name__)
A_ = "▁"
A_ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
A_ = {
"google/pegasus-xsum": 5_12,
}
class UpperCamelCase__ ( a ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = PegasusTokenizer
_snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="<mask_2>" , SCREAMING_SNAKE_CASE="<mask_1>" , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1_03 , **SCREAMING_SNAKE_CASE , ) -> List[str]:
__lowerCAmelCase : List[str] = offset
if additional_special_tokens is not None:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError(
F"""additional_special_tokens should be of type {type(SCREAMING_SNAKE_CASE )}, but is"""
F""" {type(SCREAMING_SNAKE_CASE )}""" )
__lowerCAmelCase : Dict = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(SCREAMING_SNAKE_CASE ) , self.offset - 1 )
]
if len(set(SCREAMING_SNAKE_CASE ) ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
__lowerCAmelCase : Tuple = additional_special_tokens_extended
else:
__lowerCAmelCase : List[str] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , mask_token_sent=SCREAMING_SNAKE_CASE , offset=SCREAMING_SNAKE_CASE , additional_special_tokens=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = vocab_file
__lowerCAmelCase : Union[str, Any] = False if not self.vocab_file else True
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
__lowerCAmelCase : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase : Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 123 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : Union[str, Any] = logging.get_logger(__name__)
__magic_name__ : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : Any = """megatron-bert"""
def __init__( self , _A=2_9_0_5_6 , _A=1_0_2_4 , _A=2_4 , _A=1_6 , _A=4_0_9_6 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=2 , _A=0.02 , _A=1e-1_2 , _A=0 , _A="absolute" , _A=True , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
UpperCamelCase : int = vocab_size
UpperCamelCase : int = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Tuple = num_attention_heads
UpperCamelCase : Any = hidden_act
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : Union[str, Any] = hidden_dropout_prob
UpperCamelCase : List[Any] = attention_probs_dropout_prob
UpperCamelCase : Union[str, Any] = max_position_embeddings
UpperCamelCase : int = type_vocab_size
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Optional[int] = position_embedding_type
UpperCamelCase : int = use_cache
| 102 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A : str = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 275 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : Optional[int] =get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class _lowercase (__snake_case , unittest.TestCase ):
'''simple docstring'''
lowercase__ = SpeechTaTokenizer
lowercase__ = False
lowercase__ = True
def _lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ = SpeechTaTokenizer(__UpperCamelCase )
UpperCamelCase_ = AddedToken("<mask>" , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase )
UpperCamelCase_ = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = "this is a test"
UpperCamelCase_ = "this is a test"
return input_text, output_text
def _lowerCamelCase ( self , snake_case__ , snake_case__=False , snake_case__=20 , snake_case__=5 ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ = self.get_input_output_texts(__UpperCamelCase )
UpperCamelCase_ = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
UpperCamelCase_ = tokenizer.decode(__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )
return text, ids
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = "<pad>"
UpperCamelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(__UpperCamelCase ) , 81 )
def _lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.get_tokenizers(do_lower_case=__UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCamelCase_ = tokenizer.vocab_size
UpperCamelCase_ = len(__UpperCamelCase )
self.assertNotEqual(__UpperCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCamelCase_ = ["aaaaa bbbbbb", "cccccccccdddddddd"]
UpperCamelCase_ = tokenizer.add_tokens(__UpperCamelCase )
UpperCamelCase_ = tokenizer.vocab_size
UpperCamelCase_ = len(__UpperCamelCase )
self.assertNotEqual(__UpperCamelCase , 0 )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , len(__UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , all_size + len(__UpperCamelCase ) )
UpperCamelCase_ = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=__UpperCamelCase )
self.assertGreaterEqual(len(__UpperCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCamelCase_ = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
UpperCamelCase_ = tokenizer.add_special_tokens(__UpperCamelCase )
UpperCamelCase_ = tokenizer.vocab_size
UpperCamelCase_ = len(__UpperCamelCase )
self.assertNotEqual(__UpperCamelCase , 0 )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , len(__UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , all_size_a + len(__UpperCamelCase ) )
UpperCamelCase_ = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=__UpperCamelCase )
self.assertGreaterEqual(len(__UpperCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def _lowerCamelCase ( self ):
'''simple docstring'''
pass
def _lowerCamelCase ( self ):
'''simple docstring'''
pass
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(__UpperCamelCase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
UpperCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
# fmt: off
self.assertListEqual(__UpperCamelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
UpperCamelCase_ = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
UpperCamelCase_ = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=__UpperCamelCase , )
| 702 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : List[str] =get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase : int =25_0004
UpperCAmelCase : Dict =25_0020
@require_sentencepiece
@require_tokenizers
class _lowercase (a_ , unittest.TestCase ):
'''simple docstring'''
lowercase__ = MBartTokenizer
lowercase__ = MBartTokenizerFast
lowercase__ = True
lowercase__ = True
def _lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ = MBartTokenizer(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = MBartTokenizer(snake_case__ , keep_accents=snake_case__ )
UpperCamelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCamelCase_ = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _lowerCamelCase ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCamelCase_ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase_ = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
UpperCamelCase_ = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
UpperCamelCase_ = tempfile.mkdtemp()
UpperCamelCase_ = tokenizer_r.save_pretrained(snake_case__ )
UpperCamelCase_ = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
UpperCamelCase_ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
UpperCamelCase_ = tokenizer_r.from_pretrained(snake_case__ )
UpperCamelCase_ = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
UpperCamelCase_ = tempfile.mkdtemp()
UpperCamelCase_ = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
UpperCamelCase_ = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
UpperCamelCase_ = tokenizer_r.from_pretrained(snake_case__ )
UpperCamelCase_ = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
UpperCamelCase_ = tempfile.mkdtemp()
UpperCamelCase_ = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
UpperCamelCase_ = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCamelCase_ = tokenizer_r.from_pretrained(snake_case__ )
UpperCamelCase_ = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase (unittest.TestCase ):
'''simple docstring'''
lowercase__ = """facebook/mbart-large-en-ro"""
lowercase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowercase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowercase__ = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def _lowerCamelCase ( cls ):
'''simple docstring'''
UpperCamelCase_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
UpperCamelCase_ = 1
return cls
def _lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 25_0020 )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
UpperCamelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
UpperCamelCase_ = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
UpperCamelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , snake_case__ )
UpperCamelCase_ = 10
UpperCamelCase_ = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , snake_case__ )
self.assertEqual(len(snake_case__ ) , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_0026, 25_0001] )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = tempfile.mkdtemp()
UpperCamelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = MBartTokenizer.from_pretrained(snake_case__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ )
@require_torch
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors="pt" )
UpperCamelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCamelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors="pt" )
UpperCamelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors="pt" )
UpperCamelCase_ = targets["input_ids"]
UpperCamelCase_ = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(snake_case__ ) , {
# A, test, EOS, en_XX
"input_ids": [[62, 3034, 2, 25_0004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_0001,
} , )
| 504 | 0 |
from __future__ import annotations
def __a ( A__ : list , A__ : int , A__ : int , A__ : int ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
SCREAMING_SNAKE_CASE = result + left + right
return input_list
def __a ( A__ : list ):
if len(A__ ) <= 1:
return input_list
SCREAMING_SNAKE_CASE = list(A__ )
# iteration for two-way merging
SCREAMING_SNAKE_CASE = 2
while p <= len(A__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(A__ ) , A__ ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = i + p - 1
SCREAMING_SNAKE_CASE = (low + high + 1) // 2
SCREAMING_SNAKE_CASE = merge(A__ , A__ , A__ , A__ )
# final merge of last two parts
if p * 2 >= len(A__ ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = merge(A__ , 0 , A__ , len(A__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__A : Any = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
__A : Union[str, Any] = []
else:
__A : Optional[Any] = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted)) | 16 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "vit"
def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=16 , **UpperCAmelCase__ , ) -> Dict:
super().__init__(**UpperCAmelCase__ )
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = qkv_bias
a_ = encoder_stride
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
return 1e-4
| 697 | 0 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
snake_case = 3
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
print('Generating primitive root of p' )
while True:
SCREAMING_SNAKE_CASE = random.randrange(3, __A )
if pow(__A, 2, __A ) == 1:
continue
if pow(__A, __A, __A ) == 1:
continue
return g
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...' )
SCREAMING_SNAKE_CASE = rabin_miller.generate_large_prime(__A ) # select large prime number.
SCREAMING_SNAKE_CASE = primitive_root(__A ) # one primitive root on modulo p.
SCREAMING_SNAKE_CASE = random.randrange(3, __A ) # private_key -> have to be greater than 2 for safety.
SCREAMING_SNAKE_CASE = cryptomath.find_mod_inverse(pow(__A, __A, __A ), __A )
SCREAMING_SNAKE_CASE = (key_size, e_a, e_a, p)
SCREAMING_SNAKE_CASE = (key_size, d)
return public_key, private_key
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print('\nWARNING:' )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'Use a different name or delete these files and re-run this program.' )
sys.exit()
SCREAMING_SNAKE_CASE = generate_key(__A )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''', 'w' ) as fo:
fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''', 'w' ) as fo:
fo.write(f'''{private_key[0]},{private_key[1]}''' )
def UpperCamelCase_ ( ) -> None:
print('Making key files...' )
make_key_files('elgamal', 2_0_4_8 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 708 |
"""simple docstring"""
from functools import reduce
snake_case = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str(int(SCREAMING_SNAKE_CASE_ ) * int(SCREAMING_SNAKE_CASE_ ) ), n[i : i + 1_3] ) )
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1_2 ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 406 | 0 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def A_ ( snake_case_ : int ):
'''simple docstring'''
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case_ : float ,snake_case_ : float ) -> bool:
UpperCamelCase : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCamelCase : Tuple = mean(
int(is_in_circle(uniform(-1.0 ,1.0 ) ,uniform(-1.0 ,1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
UpperCamelCase : List[Any] = proportion * 4
print(f'The estimated value of pi is {pi_estimate}' )
print(f'The numpy value of pi is {pi}' )
print(f'The total error is {abs(pi - pi_estimate )}' )
def A_ ( snake_case_ : int ,snake_case_ : Callable[[float], float] ,snake_case_ : float = 0.0 ,snake_case_ : float = 1.0 ,):
'''simple docstring'''
return mean(
function_to_integrate(uniform(snake_case_ ,snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def A_ ( snake_case_ : int ,snake_case_ : float = 0.0 ,snake_case_ : float = 1.0 ):
'''simple docstring'''
def identity_function(snake_case_ : float ) -> float:
return x
UpperCamelCase : Union[str, Any] = area_under_curve_estimator(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
UpperCamelCase : Dict = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {expected_value}' )
print(f'Total error is {abs(estimated_value - expected_value )}' )
print("""******************""" )
def A_ ( snake_case_ : int ):
'''simple docstring'''
def function_to_integrate(snake_case_ : float ) -> float:
return sqrt(4.0 - x * x )
UpperCamelCase : int = area_under_curve_estimator(
snake_case_ ,snake_case_ ,0.0 ,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {pi}' )
print(f'Total error is {abs(estimated_value - pi )}' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 499 |
"""simple docstring"""
import string
def A_ ( snake_case_ : str ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
UpperCamelCase : Optional[int] = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCamelCase : Optional[int] = string.ascii_uppercase.find(snake_case_ )
UpperCamelCase : str = num - key
if num < 0:
UpperCamelCase : Optional[int] = num + len(string.ascii_uppercase )
UpperCamelCase : Optional[int] = translated + string.ascii_uppercase[num]
else:
UpperCamelCase : List[str] = translated + symbol
print(f'Decryption using Key #{key}: {translated}' )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : List[Any] = input("""Encrypted message: """ )
UpperCamelCase : str = message.upper()
decrypt(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 499 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
UpperCamelCase_ = logging.get_logger(__name__)
class _snake_case ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ : Tuple = ["pixel_values"]
def __init__( self: Optional[Any] ,lowerCamelCase_: int = True ,lowerCamelCase_: Any = None ,lowerCamelCase_: Optional[Any] = PILImageResampling.BILINEAR ,lowerCamelCase_: Union[str, Any] = True ,lowerCamelCase_: Dict = 1 / 255 ,lowerCamelCase_: str = True ,lowerCamelCase_: List[Any] = None ,lowerCamelCase_: List[str] = True ,**lowerCamelCase_: Any ,) -> List[str]:
super().__init__(**snake_case__ )
UpperCAmelCase_ : Any = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ : str = get_size_dict(snake_case__ ,default_to_square=snake_case__ )
UpperCAmelCase_ : Dict = crop_size if crop_size is not None else {"height": 256, "width": 256}
UpperCAmelCase_ : Optional[int] = get_size_dict(snake_case__ ,param_name="""crop_size""" )
UpperCAmelCase_ : Optional[Any] = do_resize
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : Optional[int] = resample
UpperCAmelCase_ : Optional[int] = do_rescale
UpperCAmelCase_ : List[Any] = rescale_factor
UpperCAmelCase_ : int = do_center_crop
UpperCAmelCase_ : str = crop_size
UpperCAmelCase_ : Any = do_flip_channel_order
def A__ ( self: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] = PIL.Image.BILINEAR ,lowerCamelCase_: Any = None ,**lowerCamelCase_: Optional[int] ,) -> Dict:
UpperCAmelCase_ : Any = get_size_dict(snake_case__ ,default_to_square=snake_case__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCAmelCase_ : List[str] = get_resize_output_image_size(snake_case__ ,size=size["""shortest_edge"""] ,default_to_square=snake_case__ )
return resize(snake_case__ ,size=snake_case__ ,resample=snake_case__ ,data_format=snake_case__ ,**snake_case__ )
def A__ ( self: Dict ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[Any] = None ,**lowerCamelCase_: Tuple ,) -> Tuple:
UpperCAmelCase_ : Any = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(snake_case__ ,size=(size["""height"""], size["""width"""]) ,data_format=snake_case__ ,**snake_case__ )
def A__ ( self: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple = None ,**lowerCamelCase_: Any ,) -> List[str]:
return rescale(snake_case__ ,scale=snake_case__ ,data_format=snake_case__ ,**snake_case__ )
def A__ ( self: List[str] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any = None ) -> List[Any]:
return flip_channel_order(snake_case__ ,data_format=snake_case__ )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[str] = None ,lowerCamelCase_: List[Any] = None ,lowerCamelCase_: Dict = None ,lowerCamelCase_: int = None ,lowerCamelCase_: Tuple = None ,lowerCamelCase_: Dict = None ,lowerCamelCase_: Tuple = None ,lowerCamelCase_: Optional[Any] = None ,lowerCamelCase_: str = None ,lowerCamelCase_: Tuple = ChannelDimension.FIRST ,**lowerCamelCase_: str ,) -> Optional[int]:
UpperCAmelCase_ : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : List[Any] = resample if resample is not None else self.resample
UpperCAmelCase_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : List[str] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCAmelCase_ : Any = size if size is not None else self.size
UpperCAmelCase_ : Union[str, Any] = get_size_dict(snake_case__ ,default_to_square=snake_case__ )
UpperCAmelCase_ : str = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : Union[str, Any] = get_size_dict(snake_case__ ,param_name="""crop_size""" )
UpperCAmelCase_ : Dict = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase_ : str = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
UpperCAmelCase_ : Tuple = [self.resize(image=snake_case__ ,size=snake_case__ ,resample=snake_case__ ) for image in images]
if do_center_crop:
UpperCAmelCase_ : List[str] = [self.center_crop(image=snake_case__ ,size=snake_case__ ) for image in images]
if do_rescale:
UpperCAmelCase_ : int = [self.rescale(image=snake_case__ ,scale=snake_case__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCAmelCase_ : List[Any] = [self.flip_channel_order(image=snake_case__ ) for image in images]
UpperCAmelCase_ : Optional[Any] = [to_channel_dimension_format(snake_case__ ,snake_case__ ) for image in images]
UpperCAmelCase_ : Any = {"pixel_values": images}
return BatchFeature(data=snake_case__ ,tensor_type=snake_case__ )
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any = None ) -> Any:
UpperCAmelCase_ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(snake_case__ ):
UpperCAmelCase_ : Any = target_sizes.numpy()
UpperCAmelCase_ : List[str] = []
for idx in range(len(snake_case__ ) ):
UpperCAmelCase_ : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=snake_case__ )
UpperCAmelCase_ : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case__ )
else:
UpperCAmelCase_ : int = logits.argmax(dim=1 )
UpperCAmelCase_ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 719 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : List[Any] = "facebook/bart-large-mnli"
A__ : List[Any] = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
A__ : str = "text_classifier"
A__ : Tuple = AutoTokenizer
A__ : int = AutoModelForSequenceClassification
A__ : List[str] = ["text", ["text"]]
A__ : Dict = ["text"]
def A__ ( self: List[str] ) -> List[str]:
super().setup()
UpperCAmelCase_ : Dict = self.model.config
UpperCAmelCase_ : Dict = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
UpperCAmelCase_ : List[str] = int(lowerCamelCase_ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def A__ ( self: Dict ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : str = labels
return self.pre_processor(
[text] * len(lowerCamelCase_ ) ,[F'''This example is {label}''' for label in labels] ,return_tensors="""pt""" ,padding="""max_length""" ,)
def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : int = outputs.logits
UpperCAmelCase_ : Tuple = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 322 | 0 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ) -> List[str]:
'''simple docstring'''
_lowercase : Union[str, Any] = XCLIPTextConfig()
# derive patch size from model name
_lowercase : List[Any] = model_name.find('''patch''' )
_lowercase : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
_lowercase : List[str] = XCLIPVisionConfig(patch_size=UpperCAmelCase_ , num_frames=UpperCAmelCase_ )
if "large" in model_name:
_lowercase : Dict = 768
_lowercase : List[Any] = 3072
_lowercase : Dict = 12
_lowercase : int = 1024
_lowercase : Optional[int] = 4096
_lowercase : List[str] = 16
_lowercase : Optional[Any] = 24
_lowercase : Any = 768
_lowercase : str = 3072
if model_name == "xclip-large-patch14-16-frames":
_lowercase : Any = 336
_lowercase : Dict = XCLIPConfig.from_text_vision_configs(UpperCAmelCase_ , UpperCAmelCase_ )
if "large" in model_name:
_lowercase : Union[str, Any] = 768
return config
def UpperCamelCase__ ( UpperCAmelCase_ ) -> Tuple:
'''simple docstring'''
if name == "token_embedding.weight":
_lowercase : Any = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
_lowercase : Union[str, Any] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
_lowercase : Dict = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
_lowercase : Any = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
_lowercase : List[str] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
_lowercase : Dict = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
_lowercase : Optional[Any] = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
_lowercase : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
_lowercase : Tuple = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
_lowercase : List[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
_lowercase : str = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
_lowercase : Tuple = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
_lowercase : Optional[Any] = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
_lowercase : int = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
_lowercase : Tuple = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
_lowercase : str = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
_lowercase : Tuple = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
_lowercase : Tuple = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
_lowercase : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
_lowercase : Any = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
_lowercase : Optional[Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
_lowercase : Any = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowercase : List[str] = orig_state_dict.pop(UpperCAmelCase_ )
if "attn.in_proj" in key:
_lowercase : str = key.split('''.''' )
if key.startswith('''visual''' ):
_lowercase : List[Any] = key_split[3]
_lowercase : List[str] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
_lowercase : Union[str, Any] = val[
:dim, :
]
_lowercase : Union[str, Any] = val[
dim : dim * 2, :
]
_lowercase : int = val[
-dim:, :
]
else:
_lowercase : Union[str, Any] = val[
:dim
]
_lowercase : Optional[Any] = val[
dim : dim * 2
]
_lowercase : List[Any] = val[
-dim:
]
else:
if "weight" in key:
_lowercase : Tuple = val[
:dim, :
]
_lowercase : Union[str, Any] = val[
dim : dim * 2, :
]
_lowercase : Optional[Any] = val[
-dim:, :
]
else:
_lowercase : str = val[:dim]
_lowercase : List[Any] = val[
dim : dim * 2
]
_lowercase : Dict = val[-dim:]
elif key.startswith('''mit''' ):
_lowercase : Optional[Any] = key_split[2]
_lowercase : Dict = config.vision_config.mit_hidden_size
if "weight" in key:
_lowercase : List[Any] = val[:dim, :]
_lowercase : int = val[dim : dim * 2, :]
_lowercase : Dict = val[-dim:, :]
else:
_lowercase : Tuple = val[:dim]
_lowercase : Optional[Any] = val[dim : dim * 2]
_lowercase : Optional[Any] = val[-dim:]
else:
_lowercase : int = key_split[2]
_lowercase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowercase : str = val[:dim, :]
_lowercase : List[str] = val[
dim : dim * 2, :
]
_lowercase : Tuple = val[-dim:, :]
else:
_lowercase : Tuple = val[:dim]
_lowercase : Any = val[
dim : dim * 2
]
_lowercase : Union[str, Any] = val[-dim:]
else:
_lowercase : List[str] = rename_key(UpperCAmelCase_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
_lowercase : Tuple = val.T
_lowercase : Optional[int] = val
return orig_state_dict
def UpperCamelCase__ ( UpperCAmelCase_ ) -> int:
'''simple docstring'''
if num_frames == 8:
_lowercase : Optional[Any] = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
_lowercase : Union[str, Any] = '''eating_spaghetti.npy'''
elif num_frames == 32:
_lowercase : Optional[Any] = '''eating_spaghetti_32_frames.npy'''
_lowercase : Optional[Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=UpperCAmelCase_ , repo_type='''dataset''' , )
_lowercase : List[Any] = np.load(UpperCAmelCase_ )
return list(UpperCAmelCase_ )
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=False ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[str] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
_lowercase : str = model_to_url[model_name]
_lowercase : Tuple = 8
if "16-frames" in model_name:
_lowercase : Any = 16
elif "shot" in model_name:
_lowercase : List[Any] = 32
_lowercase : Optional[int] = get_xclip_config(UpperCAmelCase_ , UpperCAmelCase_ )
_lowercase : str = XCLIPModel(UpperCAmelCase_ )
model.eval()
if "drive" in checkpoint_url:
_lowercase : List[str] = '''pytorch_model.bin'''
gdown.cached_download(UpperCAmelCase_ , UpperCAmelCase_ , quiet=UpperCAmelCase_ )
_lowercase : Tuple = torch.load(UpperCAmelCase_ , map_location='''cpu''' )['''model''']
else:
_lowercase : Optional[int] = torch.hub.load_state_dict_from_url(UpperCAmelCase_ )['''model''']
_lowercase : Any = convert_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
_lowercase : Dict = XCLIPModel(UpperCAmelCase_ )
_lowercase , _lowercase : Optional[Any] = model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
_lowercase : Dict = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
_lowercase : str = VideoMAEImageProcessor(size=UpperCAmelCase_ )
_lowercase : Optional[int] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
_lowercase : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
_lowercase : Any = XCLIPProcessor(image_processor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
_lowercase : Tuple = prepare_video(UpperCAmelCase_ )
_lowercase : Optional[int] = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=UpperCAmelCase_ , return_tensors='''pt''' , padding=UpperCAmelCase_ )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
_lowercase : Dict = model(**UpperCAmelCase_ )
# Verify outputs
_lowercase : str = outputs.logits_per_video
_lowercase : List[Any] = logits_per_video.softmax(dim=1 )
print('''Probs:''' , UpperCAmelCase_ )
# kinetics-400
if model_name == "xclip-base-patch32":
_lowercase : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
_lowercase : List[str] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
_lowercase : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
_lowercase : Optional[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
_lowercase : List[Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
_lowercase : List[Any] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
_lowercase : Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
_lowercase : str = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
_lowercase : Tuple = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
_lowercase : Tuple = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
_lowercase : Optional[int] = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
_lowercase : List[Any] = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
_lowercase : Optional[Any] = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
_lowercase : Dict = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
_lowercase : List[Any] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
_lowercase : Tuple = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
_lowercase : Dict = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
_lowercase : List[str] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(F'Model name {model_name} not supported' )
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(UpperCAmelCase_ , organization='''nielsr''' )
processor.push_to_hub(UpperCAmelCase_ , organization='''nielsr''' )
slow_tokenizer.push_to_hub(UpperCAmelCase_ , organization='''nielsr''' )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCamelCase__ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 322 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
UpperCAmelCase_ = '''mra'''
def __init__( self : Union[str, Any] , UpperCamelCase : Optional[Any]=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : Tuple=12 , UpperCamelCase : Union[str, Any]=12 , UpperCamelCase : int=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : Any=0.1 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : Union[str, Any]=1 , UpperCamelCase : Tuple=0.02 , UpperCamelCase : Union[str, Any]=1E-5 , UpperCamelCase : Tuple="absolute" , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : Optional[Any]="full" , UpperCamelCase : List[str]=0 , UpperCamelCase : Tuple=0 , UpperCamelCase : Any=1 , UpperCamelCase : Any=0 , UpperCamelCase : Optional[int]=2 , **UpperCamelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_lowercase : List[Any] = vocab_size
_lowercase : Any = max_position_embeddings
_lowercase : List[str] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Any = intermediate_size
_lowercase : Any = hidden_act
_lowercase : str = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : int = initializer_range
_lowercase : Dict = type_vocab_size
_lowercase : Union[str, Any] = layer_norm_eps
_lowercase : Tuple = position_embedding_type
_lowercase : List[str] = block_per_row
_lowercase : int = approx_mode
_lowercase : Optional[Any] = initial_prior_first_n_blocks
_lowercase : Dict = initial_prior_diagonal_n_blocks | 322 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase ( A__: Optional[int] , A__: str , A__: List[Any] ) -> Tuple:
if gpta_config_file == "":
__lowerCamelCase : Dict = GPTaConfig()
else:
__lowerCamelCase : Any = GPTaConfig.from_json_file(UpperCAmelCase__ )
__lowerCamelCase : Optional[int] = GPTaModel(UpperCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
__lowerCamelCase : Dict = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__lowerCamelCase : List[str] = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase__ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
a_ : Optional[int] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 710 |
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
a_ : Tuple = logging.get_logger(__name__)
# General docstring
a_ : List[str] = '''PoolFormerConfig'''
# Base docstring
a_ : Optional[Any] = '''sail/poolformer_s12'''
a_ : List[Any] = [1, 5_12, 7, 7]
# Image classification docstring
a_ : Any = '''sail/poolformer_s12'''
a_ : Optional[int] = '''tabby, tabby cat'''
a_ : Optional[Any] = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCAmelCase ( A__: Optional[Any] , A__: float = 0.0 , A__: bool = False ) -> Tuple:
if drop_prob == 0.0 or not training:
return input
__lowerCamelCase : Dict = 1 - drop_prob
__lowerCamelCase : List[Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__lowerCamelCase : List[Any] = keep_prob + torch.rand(A__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
__lowerCamelCase : Any = input.div(A__ ) * random_tensor
return output
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a = None ):
super().__init__()
__lowerCamelCase : int = drop_prob
def snake_case_ ( self , __a ):
return drop_path(__a , self.drop_prob , self.training )
def snake_case_ ( self ):
return "p={}".format(self.drop_prob )
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a , __a , __a , __a , __a , __a=None ):
super().__init__()
__lowerCamelCase : int = patch_size if isinstance(__a , collections.abc.Iterable ) else (patch_size, patch_size)
__lowerCamelCase : int = stride if isinstance(__a , collections.abc.Iterable ) else (stride, stride)
__lowerCamelCase : Optional[int] = padding if isinstance(__a , collections.abc.Iterable ) else (padding, padding)
__lowerCamelCase : Optional[Any] = nn.Convad(__a , __a , kernel_size=__a , stride=__a , padding=__a )
__lowerCamelCase : List[str] = norm_layer(__a ) if norm_layer else nn.Identity()
def snake_case_ ( self , __a ):
__lowerCamelCase : List[Any] = self.projection(__a )
__lowerCamelCase : Dict = self.norm(__a )
return embeddings
class __lowercase( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , __a , **__a ):
super().__init__(1 , __a , **__a )
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCamelCase : str = nn.AvgPoolad(__a , stride=1 , padding=pool_size // 2 , count_include_pad=__a )
def snake_case_ ( self , __a ):
return self.pool(__a ) - hidden_states
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a , __a , __a , __a ):
super().__init__()
__lowerCamelCase : Any = nn.Convad(__a , __a , 1 )
__lowerCamelCase : Dict = nn.Convad(__a , __a , 1 )
__lowerCamelCase : List[Any] = PoolFormerDropPath(__a )
if isinstance(config.hidden_act , __a ):
__lowerCamelCase : List[str] = ACTaFN[config.hidden_act]
else:
__lowerCamelCase : str = config.hidden_act
def snake_case_ ( self , __a ):
__lowerCamelCase : int = self.conva(__a )
__lowerCamelCase : Dict = self.act_fn(__a )
__lowerCamelCase : List[str] = self.drop(__a )
__lowerCamelCase : int = self.conva(__a )
__lowerCamelCase : str = self.drop(__a )
return hidden_states
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a , __a , __a , __a , __a , __a ):
super().__init__()
__lowerCamelCase : Tuple = PoolFormerPooling(__a )
__lowerCamelCase : Union[str, Any] = PoolFormerOutput(__a , __a , __a , __a )
__lowerCamelCase : List[Any] = PoolFormerGroupNorm(__a )
__lowerCamelCase : List[Any] = PoolFormerGroupNorm(__a )
# Useful for training neural nets
__lowerCamelCase : Any = PoolFormerDropPath(__a ) if drop_path > 0.0 else nn.Identity()
__lowerCamelCase : Tuple = config.use_layer_scale
if config.use_layer_scale:
__lowerCamelCase : List[str] = nn.Parameter(
config.layer_scale_init_value * torch.ones((__a) ) , requires_grad=__a )
__lowerCamelCase : Optional[int] = nn.Parameter(
config.layer_scale_init_value * torch.ones((__a) ) , requires_grad=__a )
def snake_case_ ( self , __a ):
if self.use_layer_scale:
__lowerCamelCase : Union[str, Any] = self.pooling(self.before_norm(__a ) )
__lowerCamelCase : Any = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
__lowerCamelCase : Optional[Any] = hidden_states + self.drop_path(__a )
__lowerCamelCase : Tuple = ()
__lowerCamelCase : Optional[Any] = self.output(self.after_norm(__a ) )
__lowerCamelCase : List[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
__lowerCamelCase : List[Any] = hidden_states + self.drop_path(__a )
__lowerCamelCase : Optional[Any] = (output,) + outputs
return outputs
else:
__lowerCamelCase : Tuple = self.drop_path(self.pooling(self.before_norm(__a ) ) )
# First residual connection
__lowerCamelCase : List[str] = pooling_output + hidden_states
__lowerCamelCase : int = ()
# Second residual connection inside the PoolFormerOutput block
__lowerCamelCase : List[str] = self.drop_path(self.output(self.after_norm(__a ) ) )
__lowerCamelCase : str = hidden_states + layer_output
__lowerCamelCase : int = (output,) + outputs
return outputs
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCamelCase : int = config
# stochastic depth decay rule
__lowerCamelCase : int = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
__lowerCamelCase : List[str] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
__lowerCamelCase : Optional[int] = nn.ModuleList(__a )
# Transformer blocks
__lowerCamelCase : Any = []
__lowerCamelCase : int = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
__lowerCamelCase : Optional[int] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__a , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__a ) )
__lowerCamelCase : str = nn.ModuleList(__a )
def snake_case_ ( self , __a , __a=False , __a=True ):
__lowerCamelCase : Union[str, Any] = () if output_hidden_states else None
__lowerCamelCase : int = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
__lowerCamelCase , __lowerCamelCase : Any = layers
# Get patch embeddings from hidden_states
__lowerCamelCase : Any = embedding_layer(__a )
# Send the embeddings through the blocks
for _, blk in enumerate(__a ):
__lowerCamelCase : Optional[int] = blk(__a )
__lowerCamelCase : Tuple = layer_outputs[0]
if output_hidden_states:
__lowerCamelCase : Union[str, Any] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__a , hidden_states=__a )
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : Tuple = PoolFormerConfig
__a : Tuple = 'poolformer'
__a : Optional[int] = 'pixel_values'
__a : Optional[Any] = True
def snake_case_ ( self , __a ):
if isinstance(__a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__a , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def snake_case_ ( self , __a , __a=False ):
if isinstance(__a , __a ):
__lowerCamelCase : Union[str, Any] = value
a_ : Union[str, Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
a_ : List[str] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , lowercase__ , )
class __lowercase( lowercase__ ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__(__a )
__lowerCamelCase : Optional[Any] = config
__lowerCamelCase : Any = PoolFormerEncoder(__a )
# Initialize weights and apply final processing
self.post_init()
def snake_case_ ( self ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case_ ( self , __a = None , __a = None , __a = None , ):
__lowerCamelCase : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
__lowerCamelCase : Any = self.encoder(
__a , output_hidden_states=__a , return_dict=__a , )
__lowerCamelCase : int = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__a , hidden_states=encoder_outputs.hidden_states , )
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCamelCase : Optional[Any] = nn.Linear(config.hidden_size , config.hidden_size )
def snake_case_ ( self , __a ):
__lowerCamelCase : List[Any] = self.dense(__a )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , lowercase__ , )
class __lowercase( lowercase__ ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__(__a )
__lowerCamelCase : str = config.num_labels
__lowerCamelCase : Optional[Any] = PoolFormerModel(__a )
# Final norm
__lowerCamelCase : str = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
__lowerCamelCase : Optional[Any] = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case_ ( self , __a = None , __a = None , __a = None , __a = None , ):
__lowerCamelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Tuple = self.poolformer(
__a , output_hidden_states=__a , return_dict=__a , )
__lowerCamelCase : int = outputs[0]
__lowerCamelCase : Optional[int] = self.classifier(self.norm(__a ).mean([-2, -1] ) )
__lowerCamelCase : Union[str, Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCamelCase : Any = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCamelCase : Any = 'single_label_classification'
else:
__lowerCamelCase : Optional[Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
__lowerCamelCase : int = MSELoss()
if self.num_labels == 1:
__lowerCamelCase : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowerCamelCase : Optional[Any] = loss_fct(__a , __a )
elif self.config.problem_type == "single_label_classification":
__lowerCamelCase : Tuple = CrossEntropyLoss()
__lowerCamelCase : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCamelCase : List[Any] = BCEWithLogitsLoss()
__lowerCamelCase : Optional[Any] = loss_fct(__a , __a )
if not return_dict:
__lowerCamelCase : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__a , logits=__a , hidden_states=outputs.hidden_states )
| 263 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class UpperCamelCase__ ( __magic_name__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__SCREAMING_SNAKE_CASE : str = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
__SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'text': Value('string' )} )
__SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'labels': ClassLabel} )
__SCREAMING_SNAKE_CASE : str = "text"
__SCREAMING_SNAKE_CASE : str = "labels"
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , UpperCamelCase__ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
lowercase_ = copy.deepcopy(self )
lowercase_ = self.label_schema.copy()
lowercase_ = features[self.label_column]
lowercase_ = label_schema
return task_template
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 412 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=UpperCamelCase__ , )
assert hasattr(self , """env""" )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
lowercase_ = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowercase_ = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowercase_ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowercase_ = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version="""py36""" , )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
lowercase_ = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
lowercase_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase_ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowercase_ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase_ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , UpperCamelCase__ )
| 412 | 1 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowerCamelCase__ ( A__ : Tuple , A__ : str , A__ : Any , A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = s.rsplit(A__ , A__ )
return new.join(A__ )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = {}
__lowerCamelCase = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__lowerCamelCase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
__lowerCamelCase = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
__lowerCamelCase = rreplace(A__ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
__lowerCamelCase = rreplace(A__ , """.b""" , """.bias""" , 1 )
__lowerCamelCase = value.float()
return upgrade
@torch.no_grad()
def lowerCamelCase__ ( A__ : List[str] , A__ : List[str] , A__ : int=None , A__ : Optional[Any]=True ):
'''simple docstring'''
from dall_e import Encoder
__lowerCamelCase = Encoder()
if os.path.exists(A__ ):
__lowerCamelCase = torch.load(A__ )
else:
__lowerCamelCase = torch.hub.load_state_dict_from_url(A__ )
if isinstance(A__ , A__ ):
__lowerCamelCase = ckpt.state_dict()
encoder.load_state_dict(A__ )
if config_path is not None:
__lowerCamelCase = FlavaImageCodebookConfig.from_pretrained(A__ )
else:
__lowerCamelCase = FlavaImageCodebookConfig()
__lowerCamelCase = FlavaImageCodebook(A__ ).eval()
__lowerCamelCase = encoder.state_dict()
__lowerCamelCase = upgrade_state_dict(A__ )
hf_model.load_state_dict(A__ )
__lowerCamelCase = hf_model.state_dict()
__lowerCamelCase = count_parameters(A__ )
__lowerCamelCase = count_parameters(A__ )
assert torch.allclose(A__ , A__ , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(A__ )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase_ = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 714 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
UpperCAmelCase_ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
UpperCAmelCase_ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = ' Hello world! cécé herlolip'
UpperCAmelCase_ = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Tuple , A__ : Any , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = dct.pop(A__ )
__lowerCamelCase = val
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = torch.load(A__ , map_location="""cpu""" )
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = emb.weight.shape
__lowerCamelCase = nn.Linear(A__ , A__ , bias=A__ )
__lowerCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict=None ):
'''simple docstring'''
if not os.path.exists(A__ ):
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , A__ ).eval()
else:
__lowerCamelCase = load_xsum_checkpoint(A__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__lowerCamelCase = checkpoint_path.replace(""".""" , """-""" )
__lowerCamelCase = BartConfig.from_pretrained(A__ )
__lowerCamelCase = bart.encode(A__ ).unsqueeze(0 )
__lowerCamelCase = BartTokenizer.from_pretrained(A__ ).encode(A__ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(A__ , A__ ).all():
raise ValueError(
f'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' )
if checkpoint_path == "bart.large.mnli":
__lowerCamelCase = bart.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(A__ , A__ , A__ )
__lowerCamelCase = BartForSequenceClassification(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = bart.predict("""mnli""" , A__ , return_logits=A__ )
__lowerCamelCase = model(A__ )[0] # logits
else: # no classification heads to worry about
__lowerCamelCase = bart.model.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""]
__lowerCamelCase = bart.extract_features(A__ )
if hf_checkpoint_name == "facebook/bart-large":
__lowerCamelCase = BartModel(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = model(A__ ).model[0]
else:
__lowerCamelCase = BartForConditionalGeneration(A__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(A__ )
if hasattr(A__ , """lm_head""" ):
__lowerCamelCase = make_linear_from_emb(model.model.shared )
__lowerCamelCase = model.model(A__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
UpperCAmelCase_ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 80 | 0 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
SCREAMING_SNAKE_CASE = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
SCREAMING_SNAKE_CASE = [ord(letter) for letter in string.ascii_lowercase]
SCREAMING_SNAKE_CASE = {ord(char) for char in VALID_CHARS}
SCREAMING_SNAKE_CASE = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase_ ( __A : list[int] , __A : tuple[int, ...] ) -> str | None:
"""simple docstring"""
lowercase : str =""
lowercase : int
lowercase : int
lowercase : int
for keychar, cipherchar in zip(cycle(__A ) , __A ):
lowercase : str =cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__A )
return decoded
def lowercase_ ( __A : list[int] ) -> list[str]:
"""simple docstring"""
lowercase : list[str] =[]
for key in product(__A , repeat=3 ):
lowercase : List[str] =try_key(__A , __A )
if encoded is not None:
possibles.append(__A )
return possibles
def lowercase_ ( __A : list[str] , __A : str ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase_ ( __A : str = "p059_cipher.txt" ) -> int:
"""simple docstring"""
lowercase : list[int]
lowercase : list[str]
lowercase : str
lowercase : str
lowercase : str =Path(__A ).parent.joinpath(__A ).read_text(encoding='''utf-8''' )
lowercase : List[Any] =[int(__A ) for number in data.strip().split(''',''' )]
lowercase : Tuple =filter_valid_chars(__A )
for common_word in COMMON_WORDS:
lowercase : Optional[Any] =filter_common_word(__A , __A )
if len(__A ) == 1:
break
lowercase : List[Any] =possibles[0]
return sum(ord(__A ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 94 |
"""simple docstring"""
from __future__ import annotations
def A ( snake_case__ ):
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(snake_case__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(snake_case__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : int = {
"configuration_bridgetower": [
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BridgeTowerConfig",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
],
"processing_bridgetower": ["BridgeTowerProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = ["BridgeTowerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST",
"BridgeTowerForContrastiveLearning",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerModel",
"BridgeTowerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 364 |
import math
def lowerCAmelCase ( _lowerCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def lowerCAmelCase ( _lowerCAmelCase : float = 1 / 1_2345 ):
"""simple docstring"""
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 3
while True:
UpperCAmelCase__ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
UpperCAmelCase__ = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 364 | 1 |
"""simple docstring"""
__A = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 93 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 374 | 0 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
A_ :str = logging.getLogger(__name__)
def A ( a_ ) -> Dict:
__UpperCamelCase : Optional[int] =git.Repo(search_parent_directories=a_ )
__UpperCamelCase : Any ={
'repo_id': str(a_ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(a_ ,'git_log.json' ) ,'w' ) as f:
json.dump(a_ ,a_ ,indent=4 )
def A ( a_ ) -> Tuple:
if params.n_gpu <= 0:
__UpperCamelCase : str =0
__UpperCamelCase : List[Any] =-1
__UpperCamelCase : int =True
__UpperCamelCase : Optional[Any] =False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
__UpperCamelCase : List[Any] =int(os.environ['WORLD_SIZE'] )
__UpperCamelCase : Tuple =int(os.environ['N_GPU_NODE'] )
__UpperCamelCase : Optional[int] =int(os.environ['RANK'] )
# number of nodes / node ID
__UpperCamelCase : Dict =params.world_size // params.n_gpu_per_node
__UpperCamelCase : Optional[int] =params.global_rank // params.n_gpu_per_node
__UpperCamelCase : List[Any] =True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
__UpperCamelCase : Any =1
__UpperCamelCase : Tuple =0
__UpperCamelCase : List[str] =0
__UpperCamelCase : Optional[int] =0
__UpperCamelCase : Tuple =1
__UpperCamelCase : Optional[Any] =1
__UpperCamelCase : Optional[Any] =False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__UpperCamelCase : Optional[int] =params.node_id == 0 and params.local_rank == 0
__UpperCamelCase : Optional[Any] =params.n_nodes > 1
# summary
__UpperCamelCase : Optional[Any] =F'--- Global rank: {params.global_rank} - '
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' ,backend='nccl' ,)
def A ( a_ ) -> Dict:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 716 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A_ :List[str] = None
A_ :Dict = logging.get_logger(__name__)
A_ :Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A_ :Union[str, Any] = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
A_ :Union[str, Any] = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
A_ :Any = '''▁'''
# Segments (not really needed)
A_ :Tuple = 0
A_ :Union[str, Any] = 1
A_ :Tuple = 2
A_ :str = 3
A_ :int = 4
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : List[str] =VOCAB_FILES_NAMES
UpperCamelCase__ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Dict ="""left"""
UpperCamelCase__ : int =XLNetTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<sep>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<cls>" , lowerCamelCase__="<mask>" , lowerCamelCase__=["<eop>", "<eod>"] , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
vocab_file=lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
__UpperCamelCase : Union[str, Any] =3
__UpperCamelCase : Optional[int] =do_lower_case
__UpperCamelCase : Optional[Any] =remove_space
__UpperCamelCase : Union[str, Any] =keep_accents
__UpperCamelCase : Optional[Any] =vocab_file
__UpperCamelCase : List[str] =False if not self.vocab_file else True
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : List[str] =[self.sep_token_id]
__UpperCamelCase : str =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : Tuple =[self.sep_token_id]
__UpperCamelCase : Dict =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : Tuple =os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 154 | 0 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Optional[int] = fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , lowerCamelCase_ ).groups()[0]
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None) -> List[str]:
"""simple docstring"""
_lowercase : int = file_names
_lowercase : List[str] = image_transform
_lowercase : List[Any] = label_to_id
def __len__( self) -> str:
"""simple docstring"""
return len(self.file_names)
def __getitem__( self, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Tuple = self.file_names[idx]
_lowercase : Tuple = PIL.Image.open(lowerCamelCase)
_lowercase : Any = raw_image.convert('RGB')
if self.image_transform is not None:
_lowercase : Dict = self.image_transform(lowerCamelCase)
_lowercase : Dict = extract_label(lowerCamelCase)
if self.label_to_id is not None:
_lowercase : List[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
# Initialize accelerator
if args.with_tracking:
_lowercase : int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
_lowercase : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase : List[str] = config['lr']
_lowercase : Optional[int] = int(config['num_epochs'] )
_lowercase : Union[str, Any] = int(config['seed'] )
_lowercase : Dict = int(config['batch_size'] )
_lowercase : List[str] = config['image_size']
if not isinstance(lowerCamelCase_ , (list, tuple) ):
_lowercase : List[str] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
_lowercase : Optional[Any] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_lowercase : int = int(args.checkpointing_steps )
else:
raise ValueError(
F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
_lowercase : Union[str, Any] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_lowercase : Tuple = os.path.split(lowerCamelCase_ )[-1].split('.' )[0]
accelerator.init_trackers(lowerCamelCase_ , lowerCamelCase_ )
# Grab all the image filenames
_lowercase : str = [os.path.join(args.data_dir , lowerCamelCase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
_lowercase : List[str] = [extract_label(lowerCamelCase_ ) for fname in file_names]
_lowercase : Dict = list(set(lowerCamelCase_ ) )
id_to_label.sort()
_lowercase : Optional[int] = {lbl: i for i, lbl in enumerate(lowerCamelCase_ )}
# Set the seed before splitting the data.
np.random.seed(lowerCamelCase_ )
torch.manual_seed(lowerCamelCase_ )
torch.cuda.manual_seed_all(lowerCamelCase_ )
# Split our filenames between train and validation
_lowercase : Optional[Any] = np.random.permutation(len(lowerCamelCase_ ) )
_lowercase : str = int(0.8 * len(lowerCamelCase_ ) )
_lowercase : Any = random_perm[:cut]
_lowercase : Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_lowercase : Optional[int] = Compose([RandomResizedCrop(lowerCamelCase_ , scale=(0.5, 1.0) ), ToTensor()] )
_lowercase : Optional[Any] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowerCamelCase_ , label_to_id=lowerCamelCase_ )
# For evaluation, we use a deterministic Resize
_lowercase : Union[str, Any] = Compose([Resize(lowerCamelCase_ ), ToTensor()] )
_lowercase : Optional[Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowerCamelCase_ , label_to_id=lowerCamelCase_ )
# Instantiate dataloaders.
_lowercase : List[str] = DataLoader(lowerCamelCase_ , shuffle=lowerCamelCase_ , batch_size=lowerCamelCase_ , num_workers=4 )
_lowercase : List[Any] = DataLoader(lowerCamelCase_ , shuffle=lowerCamelCase_ , batch_size=lowerCamelCase_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase : List[str] = create_model('resnet50d' , pretrained=lowerCamelCase_ , num_classes=len(lowerCamelCase_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowercase : Union[str, Any] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_lowercase : Dict = False
for param in model.get_classifier().parameters():
_lowercase : Any = True
# We normalize the batches of images to be a bit faster.
_lowercase : int = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
_lowercase : Union[str, Any] = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_lowercase : int = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_lowercase : List[Any] = OneCycleLR(optimizer=lowerCamelCase_ , max_lr=lowerCamelCase_ , epochs=lowerCamelCase_ , steps_per_epoch=len(lowerCamelCase_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = accelerator.prepare(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# We need to keep track of how many total steps we have iterated over
_lowercase : Optional[int] = 0
# We also need to keep track of the starting epoch so files are named properly
_lowercase : List[Any] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
_lowercase : Optional[Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_lowercase : List[str] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_lowercase : str = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_lowercase : Union[str, Any] = os.path.splitext(lowerCamelCase_ )[0]
if "epoch" in training_difference:
_lowercase : Any = int(training_difference.replace('epoch_' , '' ) ) + 1
_lowercase : Optional[Any] = None
else:
_lowercase : Dict = int(training_difference.replace('step_' , '' ) )
_lowercase : Any = resume_step // len(lowerCamelCase_ )
resume_step -= starting_epoch * len(lowerCamelCase_ )
# Now we train the model
for epoch in range(lowerCamelCase_ , lowerCamelCase_ ):
model.train()
if args.with_tracking:
_lowercase : Tuple = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_lowercase : Union[str, Any] = accelerator.skip_first_batches(lowerCamelCase_ , lowerCamelCase_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_lowercase : Any = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_lowercase : Dict = {k: v.to(accelerator.device ) for k, v in batch.items()}
_lowercase : List[Any] = (batch['image'] - mean) / std
_lowercase : Optional[int] = model(lowerCamelCase_ )
_lowercase : Union[str, Any] = torch.nn.functional.cross_entropy(lowerCamelCase_ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowerCamelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : Tuple = F'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_lowercase : Tuple = os.path.join(args.output_dir , lowerCamelCase_ )
accelerator.save_state(lowerCamelCase_ )
model.eval()
_lowercase : List[str] = 0
_lowercase : List[Any] = 0
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_lowercase : Union[str, Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
_lowercase : List[Any] = (batch['image'] - mean) / std
with torch.no_grad():
_lowercase : Optional[Any] = model(lowerCamelCase_ )
_lowercase : Any = outputs.argmax(dim=-1 )
_lowercase , _lowercase : Optional[int] = accelerator.gather_for_metrics((predictions, batch['label']) )
_lowercase : int = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_lowercase : Union[str, Any] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(lowerCamelCase_ ),
'epoch': epoch,
} , step=lowerCamelCase_ , )
if checkpointing_steps == "epoch":
_lowercase : Any = F'''epoch_{epoch}'''
if args.output_dir is not None:
_lowercase : Dict = os.path.join(args.output_dir , lowerCamelCase_ )
accelerator.save_state(lowerCamelCase_ )
if args.with_tracking:
accelerator.end_training()
def UpperCamelCase_( ) -> Optional[Any]:
_lowercase : Tuple = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=lowerCamelCase_ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=lowerCamelCase_ , default=lowerCamelCase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=lowerCamelCase_ , default=lowerCamelCase_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=lowerCamelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=lowerCamelCase_ , default=lowerCamelCase_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=lowerCamelCase_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
_lowercase : str = parser.parse_args()
_lowercase : List[str] = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 89 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
@dataclass
class _lowerCamelCase:
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}, )
lowercase_ : int = field(
default=10_24, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the training data."""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the validation data."""} )
lowercase_ : Optional[str] = field(default=_a, metadata={"""help""": """A csv or a json file containing the test data."""} )
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
_lowercase : int = self.train_file.split('.')[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_lowercase : Tuple = self.validation_file.split('.')[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _lowerCamelCase:
lowercase_ : str = field(
default=_a, metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""}, )
lowercase_ : str = field(
default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
}, )
def UpperCamelCase_( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowercase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
datasets.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_lowercase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowercase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_lowercase : Optional[Any] = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_lowercase : Tuple = data_args.train_file.split('.' )[-1]
_lowercase : int = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_lowercase : Any = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_lowercase : str = load_dataset('csv' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_lowercase : Optional[int] = load_dataset('json' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_lowercase : Optional[Any] = raw_datasets['train'].features['label'].names
_lowercase : Any = len(lowerCamelCase_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_lowercase : str = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCamelCase_ , )
_lowercase : Tuple = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_lowercase : int = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowercase : str = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_lowercase : List[Any] = {'Refused': 0, 'Entailed': 1}
_lowercase : Union[str, Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_lowercase : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCamelCase_ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCamelCase_ ):
_lowercase : int = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_lowercase : Any = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_lowercase : List[Any] = examples['statement']
_lowercase : Optional[Any] = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_lowercase : Union[str, Any] = tokenizer(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ )
_lowercase : Any = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_lowercase : str = raw_datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_lowercase : Any = raw_datasets['train']
if data_args.max_train_samples is not None:
_lowercase : str = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_lowercase : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_lowercase : List[Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_lowercase : Optional[int] = raw_datasets['test']
if data_args.max_predict_samples is not None:
_lowercase : List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCamelCase_ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase_ ):
_lowercase : Dict = p.predictions[0] if isinstance(p.predictions , lowerCamelCase_ ) else p.predictions
_lowercase : Tuple = np.argmax(lowerCamelCase_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowercase : Any = default_data_collator
elif training_args.fpaa:
_lowercase : str = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 )
else:
_lowercase : Optional[Any] = None
# Initialize our Trainer
_lowercase : List[str] = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
_lowercase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_lowercase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase : Optional[Any] = last_checkpoint
_lowercase : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
_lowercase : List[Any] = train_result.metrics
_lowercase : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase_ )
)
_lowercase : int = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCamelCase_ )
trainer.save_metrics('train' , lowerCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowercase : Tuple = trainer.evaluate(eval_dataset=lowerCamelCase_ )
_lowercase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase_ )
_lowercase : Optional[int] = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('eval' , lowerCamelCase_ )
trainer.save_metrics('eval' , lowerCamelCase_ )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_lowercase : Any = predict_dataset.remove_columns('label' )
_lowercase : Optional[Any] = trainer.predict(lowerCamelCase_ , metric_key_prefix='predict' ).predictions
_lowercase : Union[str, Any] = np.argmax(lowerCamelCase_ , axis=1 )
_lowercase : Dict = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCamelCase_ ):
_lowercase : List[str] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
_lowercase : str = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 89 | 1 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( _UpperCAmelCase ,unittest.TestCase ):
UpperCAmelCase__ : Tuple = LxmertTokenizer
UpperCAmelCase__ : Tuple = LxmertTokenizerFast
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : str = True
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
super().setUp()
__lowerCamelCase: Any = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowerCamelCase: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ):
__lowerCamelCase: str = """UNwant\u00E9d,running"""
__lowerCamelCase: str = """unwanted, running"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
__lowerCamelCase: Optional[Any] = self.tokenizer_class(self.vocab_file )
__lowerCamelCase: Optional[int] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [7, 4, 5, 10, 8, 9] )
def SCREAMING_SNAKE_CASE__ ( self : str ):
if not self.test_rust_tokenizer:
return
__lowerCamelCase: str = self.get_tokenizer()
__lowerCamelCase: Optional[Any] = self.get_rust_tokenizer()
__lowerCamelCase: Union[str, Any] = """I was born in 92000, and this is falsé."""
__lowerCamelCase: List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Tuple = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[str] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: str = self.get_rust_tokenizer()
__lowerCamelCase: Tuple = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Any = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 719 |
from __future__ import annotations
_A : List[str] = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class a :
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : dict[str, list[str]] , SCREAMING_SNAKE_CASE_ : str ):
__lowerCamelCase: Dict = graph
# mapping node to its parent in resulting breadth first tree
__lowerCamelCase: dict[str, str | None] = {}
__lowerCamelCase: str = source_vertex
def SCREAMING_SNAKE_CASE__ ( self : int ):
__lowerCamelCase: Optional[int] = {self.source_vertex}
__lowerCamelCase: Optional[int] = None
__lowerCamelCase: Optional[int] = [self.source_vertex] # first in first out queue
while queue:
__lowerCamelCase: int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = vertex
queue.append(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
__lowerCamelCase: List[str] = self.parent.get(SCREAMING_SNAKE_CASE_ )
if target_vertex_parent is None:
__lowerCamelCase: Optional[int] = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
return self.shortest_path(SCREAMING_SNAKE_CASE_ ) + F'->{target_vertex}'
if __name__ == "__main__":
_A : int = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 189 | 0 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
snake_case__ = BeautifulSoup(requests.get(__lowerCAmelCase , params=__lowerCAmelCase ).content , '''html.parser''' )
snake_case__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
snake_case__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 3_0,
"""pages""": """3979-3990""",
"""year""": 2_0_1_8,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 33 | """simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ):
stooge(__SCREAMING_SNAKE_CASE , 0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
return arr
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int ):
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowercase_ , lowercase_ : List[Any] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowercase_ : str = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(__SCREAMING_SNAKE_CASE , i + t , (__SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =input("Enter numbers separated by a comma:\n").strip()
__SCREAMING_SNAKE_CASE =[int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 425 | 0 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a , a=99 , a=13 , a=16 , a=7 , a=True , a=True , a=True , a=False , a=True , a=2 , a=32 , a=4 , a=4 , a=30 , a=0 , a=1 , a=2 , a=None , ) -> Tuple:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = decoder_seq_length
# For common tests
snake_case_ = self.decoder_seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = d_model
snake_case_ = d_model
snake_case_ = decoder_layers
snake_case_ = decoder_layers
snake_case_ = decoder_ffn_dim
snake_case_ = decoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = eos_token_id
snake_case_ = bos_token_id
snake_case_ = pad_token_id
snake_case_ = decoder_start_token_id
snake_case_ = use_cache
snake_case_ = max_position_embeddings
snake_case_ = None
snake_case_ = decoder_seq_length
snake_case_ = 2
snake_case_ = 1
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case_ = None
if self.use_attention_mask:
snake_case_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case_ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _UpperCamelCase ( self , a , a , a , a , ) -> Any:
snake_case_ = True
snake_case_ = TrOCRDecoder(config=a ).to(a ).eval()
snake_case_ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case_ = model(a , use_cache=a )
snake_case_ = model(a )
snake_case_ = model(a , use_cache=a )
self.parent.assertTrue(len(a ) == len(a ) )
self.parent.assertTrue(len(a ) == len(a ) + 1 )
snake_case_ = outputs['past_key_values']
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = model(a )['last_hidden_state']
snake_case_ = model(a , past_key_values=a )['last_hidden_state']
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(a , a , atol=1E-3 )
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCAmelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCAmelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCAmelCase = True
lowerCAmelCase = False
def _UpperCamelCase ( self ) -> Optional[Any]:
snake_case_ = TrOCRStandaloneDecoderModelTester(self , is_training=a )
snake_case_ = ConfigTester(self , config_class=a )
def _UpperCamelCase ( self ) -> Optional[int]:
pass
def _UpperCamelCase ( self ) -> List[Any]:
pass
def _UpperCamelCase ( self ) -> Optional[Any]:
pass
def _UpperCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*a )
def _UpperCamelCase ( self ) -> List[Any]:
return
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def _UpperCamelCase ( self ) -> List[str]:
pass
| 607 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> str:
snake_case_ = tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case_ = tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case_ = tf_top_k_top_p_filtering(a , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case_ = output[output != -float('inf' )]
snake_case_ = tf.cast(
tf.where(tf.not_equal(a , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(a , a , rtol=1E-12 )
tf.debugging.assert_equal(a , a )
@require_tf
class UpperCamelCase_ ( unittest.TestCase , snake_case_ ):
'''simple docstring'''
if is_tf_available():
lowerCAmelCase = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def _UpperCamelCase ( self ) -> Optional[int]:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 2
snake_case_ = 2
class UpperCamelCase_ ( tf.Module ):
'''simple docstring'''
def __init__( self , a ) -> Any:
super(a , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ),
) , jit_compile=a , )
def _UpperCamelCase ( self , a , a ) -> Optional[Any]:
snake_case_ = self.model.generate(
input_ids=a , attention_mask=a , max_new_tokens=a , return_dict_in_generate=a , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2, 0], [1_02, 1_03]]
snake_case_ = [[1, 0], [1, 1]]
snake_case_ = DummyModel(model=a )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a , a , signatures={'serving_default': dummy_model.serving} )
snake_case_ = tf.saved_model.load(a ).signatures['serving_default']
for batch_size in range(1 , len(a ) + 1 ):
snake_case_ = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case_ = serving_func(**a )['sequences']
snake_case_ = test_model.generate(**a , max_new_tokens=a )
tf.debugging.assert_equal(a , a )
@slow
def _UpperCamelCase ( self ) -> Dict:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 1
snake_case_ = 2
class UpperCamelCase_ ( tf.Module ):
'''simple docstring'''
def __init__( self , a ) -> int:
super(a , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ),
) , jit_compile=a , )
def _UpperCamelCase ( self , a , a ) -> Union[str, Any]:
snake_case_ = self.model.generate(
input_ids=a , attention_mask=a , max_new_tokens=a , return_dict_in_generate=a , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2], [1_02, 1_03]]
snake_case_ = [[1], [1, 1]]
snake_case_ = DummyModel(model=a )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a , a , signatures={'serving_default': dummy_model.serving} )
snake_case_ = tf.saved_model.load(a ).signatures['serving_default']
for input_row in range(len(a ) ):
snake_case_ = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case_ = serving_func(**a )['sequences']
snake_case_ = test_model.generate(**a , max_new_tokens=a )
tf.debugging.assert_equal(a , a )
@slow
@require_tensorflow_text
def _UpperCamelCase ( self ) -> Any:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=a )
class UpperCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self ) -> Any:
super().__init__()
snake_case_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(a , 'spiece.model' ) , 'rb' ).read() )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def _UpperCamelCase ( self , a , *a , **a ) -> int:
snake_case_ = self.tokenizer.tokenize(a )
snake_case_ , snake_case_ = text.pad_model_inputs(
a , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
snake_case_ = self.model.generate(input_ids=a , attention_mask=a )
return self.tokenizer.detokenize(a )
snake_case_ = CompleteSentenceTransformer()
snake_case_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' )
snake_case_ = complete_model(a )
snake_case_ = tf.keras.Model(a , a )
keras_model.save(a )
def _UpperCamelCase ( self ) -> Union[str, Any]:
# Has PT equivalent: this test relies on random sampling
snake_case_ = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
snake_case_ = 14
snake_case_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 'Hello, my dog is cute and'
snake_case_ = tokenizer(a , return_tensors='tf' )
snake_case_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**a , eos_token_id=a , **a )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case_ = [6_38, 1_98]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**a , eos_token_id=a , **a )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _UpperCamelCase ( self ) -> Any:
# Has PT equivalent: ample use of framework-specific code
snake_case_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
snake_case_ = 'Hugging Face is a technology company based in New York and Paris.'
snake_case_ = bart_tokenizer(a , return_tensors='tf' ).input_ids
snake_case_ = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
snake_case_ = bart_model.generate(a ).numpy()
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def _UpperCamelCase ( self , a , a=None , **a ) -> List[str]:
return super().call(a , **a )
snake_case_ = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
snake_case_ = bart_model.generate(a , foo='bar' ).numpy()
self.assertTrue(np.array_equal(a , a ) )
class UpperCamelCase_ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def _UpperCamelCase ( self , a , **a ) -> List[Any]:
return super().call(a , **a )
snake_case_ = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case_ = bart_model.generate(a ).numpy()
with self.assertRaises(a ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(a , foo='bar' )
| 607 | 1 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
pass
def __magic_name__ ( lowercase ) -> str:
"""simple docstring"""
for shard in shards:
for i in range(lowercase ):
yield {"i": i, "shard": shard}
def __magic_name__ ( ) -> List[str]:
"""simple docstring"""
lowercase_ : Dict = int(os.environ["""RANK"""] )
lowercase_ : List[str] = int(os.environ["""WORLD_SIZE"""] )
lowercase_ : int = ArgumentParser()
parser.add_argument("""--streaming""" , type=lowercase )
parser.add_argument("""--local_rank""" , type=lowercase )
parser.add_argument("""--num_workers""" , type=lowercase , default=0 )
lowercase_ : int = parser.parse_args()
lowercase_ : str = args.streaming
lowercase_ : int = args.num_workers
lowercase_ : int = {"""shards""": [f"""shard_{shard_idx}""" for shard_idx in range(lowercase )]}
lowercase_ : Any = IterableDataset.from_generator(lowercase , gen_kwargs=lowercase )
if not streaming:
lowercase_ : int = Dataset.from_list(list(lowercase ) )
lowercase_ : List[str] = split_dataset_by_node(lowercase , rank=lowercase , world_size=lowercase )
lowercase_ : List[Any] = torch.utils.data.DataLoader(lowercase , num_workers=lowercase )
lowercase_ : str = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase_ : Union[str, Any] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase_ : List[Any] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main() | 458 |
import tensorflow as tf
from ...tf_utils import shape_list
class UpperCamelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__=1, snake_case__=False, **snake_case__ ) -> Optional[int]:
"""simple docstring"""
super().__init__(**snake_case__ )
lowercase_ : Tuple = vocab_size
lowercase_ : Union[str, Any] = d_embed
lowercase_ : Optional[Any] = d_proj
lowercase_ : Optional[Any] = cutoffs + [vocab_size]
lowercase_ : Optional[int] = [0] + self.cutoffs
lowercase_ : Union[str, Any] = div_val
lowercase_ : Union[str, Any] = self.cutoffs[0]
lowercase_ : List[str] = len(self.cutoffs ) - 1
lowercase_ : List[str] = self.shortlist_size + self.n_clusters
lowercase_ : Tuple = keep_order
lowercase_ : Dict = []
lowercase_ : int = []
def snake_case__ ( self, snake_case__ ) -> Optional[int]:
"""simple docstring"""
if self.n_clusters > 0:
lowercase_ : List[str] = self.add_weight(
shape=(self.n_clusters, self.d_embed), initializer="""zeros""", trainable=snake_case__, name="""cluster_weight""" )
lowercase_ : List[Any] = self.add_weight(
shape=(self.n_clusters,), initializer="""zeros""", trainable=snake_case__, name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowercase_ : Any = self.add_weight(
shape=(self.d_embed, self.d_proj), initializer="""zeros""", trainable=snake_case__, name=f"""out_projs_._{i}""", )
self.out_projs.append(snake_case__ )
else:
self.out_projs.append(snake_case__ )
lowercase_ : List[str] = self.add_weight(
shape=(self.vocab_size, self.d_embed), initializer="""zeros""", trainable=snake_case__, name=f"""out_layers_._{i}_._weight""", )
lowercase_ : List[str] = self.add_weight(
shape=(self.vocab_size,), initializer="""zeros""", trainable=snake_case__, name=f"""out_layers_._{i}_._bias""", )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowercase_ , lowercase_ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase_ : Union[str, Any] = self.d_embed // (self.div_val**i)
lowercase_ : int = self.add_weight(
shape=(d_emb_i, self.d_proj), initializer="""zeros""", trainable=snake_case__, name=f"""out_projs_._{i}""" )
self.out_projs.append(snake_case__ )
lowercase_ : Any = self.add_weight(
shape=(r_idx - l_idx, d_emb_i), initializer="""zeros""", trainable=snake_case__, name=f"""out_layers_._{i}_._weight""", )
lowercase_ : Tuple = self.add_weight(
shape=(r_idx - l_idx,), initializer="""zeros""", trainable=snake_case__, name=f"""out_layers_._{i}_._bias""", )
self.out_layers.append((weight, bias) )
super().build(snake_case__ )
@staticmethod
def snake_case__ ( snake_case__, snake_case__, snake_case__, snake_case__=None ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Dict = x
if proj is not None:
lowercase_ : List[Any] = tf.einsum("""ibd,ed->ibe""", snake_case__, snake_case__ )
return tf.einsum("""ibd,nd->ibn""", snake_case__, snake_case__ ) + b
@staticmethod
def snake_case__ ( snake_case__, snake_case__ ) -> List[str]:
"""simple docstring"""
lowercase_ : Optional[int] = shape_list(snake_case__ )
lowercase_ : Optional[Any] = tf.range(lp_size[0], dtype=target.dtype )
lowercase_ : str = tf.stack([r, target], 1 )
return tf.gather_nd(snake_case__, snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__=True, snake_case__=False ) -> Dict:
"""simple docstring"""
lowercase_ : Any = 0
if self.n_clusters == 0:
lowercase_ : Union[str, Any] = self._logit(snake_case__, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0] )
if target is not None:
lowercase_ : str = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=snake_case__, logits=snake_case__ )
lowercase_ : Optional[int] = tf.nn.log_softmax(snake_case__, axis=-1 )
else:
lowercase_ : int = shape_list(snake_case__ )
lowercase_ : List[Any] = []
lowercase_ : Tuple = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowercase_ , lowercase_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowercase_ : Tuple = (target >= l_idx) & (target < r_idx)
lowercase_ : Union[str, Any] = tf.where(snake_case__ )
lowercase_ : Optional[Any] = tf.boolean_mask(snake_case__, snake_case__ ) - l_idx
if self.div_val == 1:
lowercase_ : Optional[Any] = self.out_layers[0][0][l_idx:r_idx]
lowercase_ : Union[str, Any] = self.out_layers[0][1][l_idx:r_idx]
else:
lowercase_ : Dict = self.out_layers[i][0]
lowercase_ : int = self.out_layers[i][1]
if i == 0:
lowercase_ : Optional[int] = tf.concat([cur_W, self.cluster_weight], 0 )
lowercase_ : Optional[Any] = tf.concat([cur_b, self.cluster_bias], 0 )
lowercase_ : List[str] = self._logit(snake_case__, snake_case__, snake_case__, self.out_projs[0] )
lowercase_ : List[str] = tf.nn.log_softmax(snake_case__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowercase_ : int = tf.boolean_mask(snake_case__, snake_case__ )
lowercase_ : Optional[int] = self._gather_logprob(snake_case__, snake_case__ )
else:
lowercase_ : List[str] = self._logit(snake_case__, snake_case__, snake_case__, self.out_projs[i] )
lowercase_ : Dict = tf.nn.log_softmax(snake_case__ )
lowercase_ : Optional[int] = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowercase_ : int = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(snake_case__ )
if target is not None:
lowercase_ : Optional[Any] = tf.boolean_mask(snake_case__, snake_case__ )
lowercase_ : Optional[int] = tf.boolean_mask(snake_case__, snake_case__ )
lowercase_ : Union[str, Any] = self._gather_logprob(snake_case__, snake_case__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(snake_case__, -cur_logprob, shape_list(snake_case__ ) )
lowercase_ : List[str] = tf.concat(snake_case__, axis=-1 )
if target is not None:
if return_mean:
lowercase_ : Tuple = tf.reduce_mean(snake_case__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(snake_case__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(snake_case__, name=self.name, aggregation="""mean""" if return_mean else """""" )
return out | 458 | 1 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
_SCREAMING_SNAKE_CASE = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
_SCREAMING_SNAKE_CASE = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def __lowerCamelCase ( __lowerCAmelCase : list[list[int]] ) -> list[list[int]]:
snake_case = []
for i in range(len(__lowerCAmelCase ) ):
snake_case = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
snake_case = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__lowerCAmelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__lowerCAmelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__lowerCAmelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
snake_case = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__lowerCAmelCase )
return next_generation
def __lowerCamelCase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ) -> list[Image.Image]:
snake_case = []
for _ in range(__lowerCAmelCase ):
# Create output image
snake_case = Image.new("""RGB""" , (len(cells[0] ), len(__lowerCAmelCase )) )
snake_case = img.load()
# Save cells to image
for x in range(len(__lowerCAmelCase ) ):
for y in range(len(cells[0] ) ):
snake_case = 2_55 - cells[y][x] * 2_55
snake_case = (colour, colour, colour)
# Save image
images.append(__lowerCAmelCase )
snake_case = new_generation(__lowerCAmelCase )
return images
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 706 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "LayoutLMv3ImageProcessor"
snake_case_ = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : str , __snake_case : int=None , __snake_case : List[Any]=None , **__snake_case : Optional[Any] )-> List[Any]:
snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __snake_case , )
snake_case = kwargs.pop("""feature_extractor""" )
snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__snake_case , __snake_case )
def __call__( self : Any , __snake_case : int , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : List[Any] , )-> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
snake_case = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
snake_case = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case = features["""words"""]
snake_case = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
snake_case = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
snake_case = self.get_overflowing_images(__snake_case , encoded_inputs["""overflow_to_sample_mapping"""] )
snake_case = images
return encoded_inputs
def lowerCAmelCase ( self : Any , __snake_case : int , __snake_case : Tuple )-> List[Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def lowerCAmelCase ( self : Optional[int] , *__snake_case : Optional[int] , **__snake_case : Optional[Any] )-> Tuple:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : str , *__snake_case : Any , **__snake_case : Optional[Any] )-> List[str]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def lowerCAmelCase ( self : Union[str, Any] )-> Tuple:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowerCAmelCase ( self : Any )-> Any:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __snake_case , )
return self.image_processor_class
@property
def lowerCAmelCase ( self : int )-> Optional[Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __snake_case , )
return self.image_processor
| 517 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = torch.device('cpu')
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase_ : Tuple = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[Any] = dct.pop(__UpperCamelCase )
lowercase_ : Optional[Any] = val
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = []
for k in state_dict.keys():
lowercase_ : Union[str, Any] = k
if ".pwconv" in k:
lowercase_ : Any = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
lowercase_ : Optional[Any] = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
lowercase_ : int = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
lowercase_ : Optional[int] = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
lowercase_ : int = k_new.split("." )
if ls[2].isdigit():
lowercase_ : int = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
lowercase_ : Union[str, Any] = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowercase_ : str = 1000
lowercase_ : Union[str, Any] = "huggingface/label-files"
lowercase_ : Dict = "imagenet-1k-id2label.json"
lowercase_ : Dict = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset" ) , "r" ) )
lowercase_ : Optional[Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowercase_ : Optional[Any] = idalabel
lowercase_ : Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowercase_ : Dict = [3, 3, 6, 4]
lowercase_ : Any = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowercase_ : List[Any] = [3, 3, 9, 6]
lowercase_ : str = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowercase_ : Optional[Any] = [4, 3, 10, 5]
lowercase_ : int = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowercase_ : List[Any] = [4, 4, 12, 6]
lowercase_ : Optional[int] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
lowercase_ : Optional[int] = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location="cpu" , check_hash=__UpperCamelCase )
else:
lowercase_ : Tuple = torch.load(__UpperCamelCase , map_location="cpu" )
lowercase_ : int = checkpoint
lowercase_ : Any = create_rename_keys(__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load HuggingFace model
lowercase_ : Union[str, Any] = SwiftFormerForImageClassification(__UpperCamelCase ).eval()
hf_model.load_state_dict(__UpperCamelCase )
# prepare test inputs
lowercase_ : Any = prepare_img()
lowercase_ : Tuple = ViTImageProcessor.from_pretrained("preprocessor_config" )
lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase , return_tensors="pt" )
# compare outputs from both models
lowercase_ : Optional[int] = get_expected_output(__UpperCamelCase )
lowercase_ : str = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __UpperCamelCase , atol=1e-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
UpperCamelCase__ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 620 |
lowercase__ : Dict = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> int:
a = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowercase__ : list[bool | None] = [None] * 10_000_000
lowercase__ : Optional[int] = True
lowercase__ : List[str] = False
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
a = chain(next_number(__UpperCamelCase))
a = number_chain
while number < 10_00_00_00:
a = number_chain
number *= 10
return number_chain
def SCREAMING_SNAKE_CASE ( __UpperCamelCase = 10_00_00_00) -> int:
for i in range(1 , __UpperCamelCase):
if CHAINS[i] is None:
chain(i + 1)
return CHAINS[:number].count(__UpperCamelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 515 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
a__ = logging.get_logger(__name__)
class UpperCAmelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self , *_a , **_a ) -> None:
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 709 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = "gptj"
UpperCAmelCase__ : Union[str, Any] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _a=5_0_4_0_0 , _a=2_0_4_8 , _a=4_0_9_6 , _a=2_8 , _a=1_6 , _a=6_4 , _a=None , _a="gelu_new" , _a=0.0 , _a=0.0 , _a=0.0 , _a=1e-5 , _a=0.02 , _a=True , _a=5_0_2_5_6 , _a=5_0_2_5_6 , _a=False , **_a , ) -> str:
_a : Any = vocab_size
_a : str = n_positions
_a : Union[str, Any] = n_embd
_a : Tuple = n_layer
_a : int = n_head
_a : List[str] = n_inner
_a : List[str] = rotary_dim
_a : Optional[int] = activation_function
_a : List[str] = resid_pdrop
_a : List[str] = embd_pdrop
_a : Optional[Any] = attn_pdrop
_a : Union[str, Any] = layer_norm_epsilon
_a : Optional[Any] = initializer_range
_a : Tuple = use_cache
_a : Union[str, Any] = bos_token_id
_a : Tuple = eos_token_id
super().__init__(
bos_token_id=_a , eos_token_id=_a , tie_word_embeddings=_a , **_a )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a = "default" , _a = None , _a = False , ) -> List[str]:
super().__init__(_a , task=_a , patching_specs=_a , use_past=_a )
if not getattr(self._config , '''pad_token_id''' , _a ):
# TODO: how to do that better?
_a : Optional[int] = 0
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
_a : List[str] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_a , direction='''inputs''' )
_a : Optional[int] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_a : List[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __lowercase ( self ) -> int:
return self._config.n_layer
@property
def __lowercase ( self ) -> int:
return self._config.n_head
def __lowercase ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ) -> Mapping[str, Any]:
_a : str = super(_a , self ).generate_dummy_inputs(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
# We need to order the input in the way they appears in the forward()
_a : List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a : Union[str, Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a : Dict = seqlen + 2
_a : Optional[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_a : Union[str, Any] = [
(torch.zeros(_a ), torch.zeros(_a )) for _ in range(self.num_layers )
]
_a : Any = common_inputs['''attention_mask''']
if self.use_past:
_a : str = ordered_inputs['''attention_mask'''].dtype
_a : Optional[int] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_a , _a , dtype=_a )] , dim=1 )
return ordered_inputs
@property
def __lowercase ( self ) -> int:
return 1_3
| 578 | 0 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__a :Dict = get_logger(__name__)
__a :int = Path(__file__).parent / 'model_card_template.md'
__a :int = uuida().hex
__a :Optional[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
__a :List[Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
__a :List[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def __snake_case ( __UpperCamelCase : Union[Dict, str, None] = None ):
"""simple docstring"""
A_ = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" ,"" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + user_agent
return ua
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if token is None:
A_ = HfFolder.get_token()
if organization is None:
A_ = whoami(__UpperCamelCase )["name"]
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Dict ):
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(__UpperCamelCase ,"local_rank" ) and args.local_rank not in [-1, 0]:
return
A_ = args.hub_token if hasattr(__UpperCamelCase ,"hub_token" ) else None
A_ = get_full_repo_name(__UpperCamelCase ,token=__UpperCamelCase )
A_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" ,license="apache-2.0" ,library_name="diffusers" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=__UpperCamelCase ,model_name=__UpperCamelCase ,repo_name=__UpperCamelCase ,dataset_name=args.dataset_name if hasattr(__UpperCamelCase ,"dataset_name" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__UpperCamelCase ,"gradient_accumulation_steps" ) else None
) ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta1" ) else None ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta2" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCamelCase ,"adam_weight_decay" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(__UpperCamelCase ,"adam_epsilon" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(__UpperCamelCase ,"lr_scheduler" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCamelCase ,"lr_warmup_steps" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCamelCase ,"ema_inv_gamma" ) else None ,ema_power=args.ema_power if hasattr(__UpperCamelCase ,"ema_power" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(__UpperCamelCase ,"ema_max_decay" ) else None ,mixed_precision=args.mixed_precision ,)
A_ = os.path.join(args.output_dir ,"README.md" )
model_card.save(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
A_ = str(Path(__UpperCamelCase ).as_posix() )
A_ = re.search(R"snapshots/([^/]+)/" ,__UpperCamelCase )
if search is None:
return None
A_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__UpperCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__a :Optional[int] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
__a :Union[str, Any] = os.path.join(hf_cache_home, 'diffusers')
def __snake_case ( __UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if new_cache_dir is None:
A_ = DIFFUSERS_CACHE
if old_cache_dir is None:
A_ = old_diffusers_cache
A_ = Path(__UpperCamelCase ).expanduser()
A_ = Path(__UpperCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
A_ = new_cache_dir / old_blob_path.relative_to(__UpperCamelCase )
new_blob_path.parent.mkdir(parents=__UpperCamelCase ,exist_ok=__UpperCamelCase )
os.replace(__UpperCamelCase ,__UpperCamelCase )
try:
os.symlink(__UpperCamelCase ,__UpperCamelCase )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__a :str = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
__a :Dict = 0
else:
with open(cache_version_file) as f:
try:
__a :str = int(f.read())
except ValueError:
__a :List[str] = 0
if cache_version < 1:
__a :Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
__a :List[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
'the directory exists and can be written to.'
)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if variant is not None:
A_ = weights_name.split("." )
A_ = splits[:-1] + [variant] + splits[-1:]
A_ = ".".join(__UpperCamelCase )
return weights_name
def __snake_case ( __UpperCamelCase : Any ,*,
__UpperCamelCase : List[Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Tuple=None ,):
"""simple docstring"""
A_ = str(__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__UpperCamelCase ):
if os.path.isfile(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ):
# Load from a PyTorch checkpoint
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ):
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse("0.20.0" )
):
try:
A_ = hf_hub_download(
__UpperCamelCase ,filename=_add_variant(__UpperCamelCase ,__UpperCamelCase ) ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,__UpperCamelCase ,)
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCamelCase ,__UpperCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__UpperCamelCase ,__UpperCamelCase )}\' so that the correct variant file can be added.''' ,__UpperCamelCase ,)
try:
# 2. Load model file as usual
A_ = hf_hub_download(
__UpperCamelCase ,filename=__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"this model name. Check the model page at "
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' ) | 86 |
from functools import lru_cache
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> set:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(SCREAMING_SNAKE_CASE__ )
if n > 1:
factors.add(SCREAMING_SNAKE_CASE__ )
return factors
@lru_cache
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
return len(unique_prime_factors(SCREAMING_SNAKE_CASE__ ) )
def _a ( SCREAMING_SNAKE_CASE__ : list ) -> bool:
'''simple docstring'''
return len(set(SCREAMING_SNAKE_CASE__ ) ) in (0, 1)
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 2
while True:
# Increment each value of a generated range
SCREAMING_SNAKE_CASE__ : List[str] = [base + i for i in range(SCREAMING_SNAKE_CASE__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
SCREAMING_SNAKE_CASE__ : Tuple = [upf_len(SCREAMING_SNAKE_CASE__ ) for x in group]
checker.append(SCREAMING_SNAKE_CASE__ )
# If all numbers in the list are equal, return the group variable.
if equality(SCREAMING_SNAKE_CASE__ ):
return group
# Increment our base variable by 1
base += 1
def _a ( SCREAMING_SNAKE_CASE__ : int = 4 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = run(SCREAMING_SNAKE_CASE__ )
return results[0] if len(SCREAMING_SNAKE_CASE__ ) else None
if __name__ == "__main__":
print(solution())
| 663 | 0 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "bart"
a__ = ["past_key_values"]
a__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __snake_case=5_02_65 , __snake_case=10_24 , __snake_case=12 , __snake_case=40_96 , __snake_case=16 , __snake_case=12 , __snake_case=40_96 , __snake_case=16 , __snake_case=0.0 , __snake_case=0.0 , __snake_case="gelu" , __snake_case=10_24 , __snake_case=0.1 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=0.0 , __snake_case=False , __snake_case=True , __snake_case=3 , __snake_case=1 , __snake_case=0 , __snake_case=2 , __snake_case=True , __snake_case=2 , __snake_case=2 , **__snake_case , ):
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : Tuple = d_model
_UpperCamelCase : Optional[int] = encoder_ffn_dim
_UpperCamelCase : Union[str, Any] = encoder_layers
_UpperCamelCase : Tuple = encoder_attention_heads
_UpperCamelCase : List[str] = decoder_ffn_dim
_UpperCamelCase : Union[str, Any] = decoder_layers
_UpperCamelCase : List[Any] = decoder_attention_heads
_UpperCamelCase : Any = dropout
_UpperCamelCase : int = attention_dropout
_UpperCamelCase : List[Any] = activation_dropout
_UpperCamelCase : List[str] = activation_function
_UpperCamelCase : Tuple = init_std
_UpperCamelCase : str = encoder_layerdrop
_UpperCamelCase : Optional[int] = decoder_layerdrop
_UpperCamelCase : Optional[int] = classifier_dropout
_UpperCamelCase : Union[str, Any] = use_cache
_UpperCamelCase : Dict = encoder_layers
_UpperCamelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , __snake_case):
_UpperCamelCase : int = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.')
class lowercase ( _lowercase ):
"""simple docstring"""
@property
def A__ ( self):
if self.task in ["default", "seq2seq-lm"]:
_UpperCamelCase : int = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
_UpperCamelCase : str = {0: 'batch'}
_UpperCamelCase : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_UpperCamelCase : Tuple = {0: 'batch', 1: 'decoder_sequence'}
_UpperCamelCase : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction='inputs')
elif self.task == "causal-lm":
# TODO: figure this case out.
_UpperCamelCase : Optional[int] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
_UpperCamelCase , _UpperCamelCase : Tuple = self.num_layers
for i in range(__snake_case):
_UpperCamelCase : Union[str, Any] = {0: 'batch', 2: 'past_sequence + sequence'}
_UpperCamelCase : List[str] = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_UpperCamelCase : Any = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
])
return common_inputs
@property
def A__ ( self):
if self.task in ["default", "seq2seq-lm"]:
_UpperCamelCase : Optional[int] = super().outputs
else:
_UpperCamelCase : Any = super(__snake_case , self).outputs
if self.use_past:
_UpperCamelCase , _UpperCamelCase : List[Any] = self.num_layers
for i in range(__snake_case):
_UpperCamelCase : List[str] = {0: 'batch', 2: 'past_sequence + sequence'}
_UpperCamelCase : Union[str, Any] = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def A__ ( self , __snake_case , __snake_case = -1 , __snake_case = -1 , __snake_case = False , __snake_case = None , ):
_UpperCamelCase : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case)
# Generate decoder inputs
_UpperCamelCase : Union[str, Any] = seq_length if not self.use_past else 1
_UpperCamelCase : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case)
_UpperCamelCase : List[str] = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_UpperCamelCase : Union[str, Any] = dict(**__snake_case , **__snake_case)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
_UpperCamelCase , _UpperCamelCase : Tuple = common_inputs['input_ids'].shape
_UpperCamelCase : Optional[int] = common_inputs['decoder_input_ids'].shape[1]
_UpperCamelCase , _UpperCamelCase : List[Any] = self.num_attention_heads
_UpperCamelCase : Union[str, Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCamelCase : List[Any] = decoder_seq_length + 3
_UpperCamelCase : Optional[int] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_UpperCamelCase : Dict = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__snake_case , __snake_case)] , dim=1)
_UpperCamelCase : int = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_UpperCamelCase , _UpperCamelCase : Any = self.num_layers
_UpperCamelCase : int = min(__snake_case , __snake_case)
_UpperCamelCase : Optional[int] = max(__snake_case , __snake_case) - min_num_layers
_UpperCamelCase : int = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__snake_case):
common_inputs["past_key_values"].append(
(
torch.zeros(__snake_case),
torch.zeros(__snake_case),
torch.zeros(__snake_case),
torch.zeros(__snake_case),
))
# TODO: test this.
_UpperCamelCase : List[Any] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__snake_case , __snake_case):
common_inputs["past_key_values"].append((torch.zeros(__snake_case), torch.zeros(__snake_case)))
return common_inputs
def A__ ( self , __snake_case , __snake_case = -1 , __snake_case = -1 , __snake_case = False , __snake_case = None , ):
_UpperCamelCase : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
_UpperCamelCase , _UpperCamelCase : Dict = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_UpperCamelCase : Tuple = seqlen + 2
_UpperCamelCase , _UpperCamelCase : Tuple = self.num_layers
_UpperCamelCase , _UpperCamelCase : str = self.num_attention_heads
_UpperCamelCase : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCamelCase : Optional[int] = common_inputs['attention_mask'].dtype
_UpperCamelCase : Optional[Any] = torch.cat(
[common_inputs['attention_mask'], torch.ones(__snake_case , __snake_case , dtype=__snake_case)] , dim=1)
_UpperCamelCase : List[Any] = [
(torch.zeros(__snake_case), torch.zeros(__snake_case)) for _ in range(__snake_case)
]
return common_inputs
def A__ ( self , __snake_case , __snake_case = -1 , __snake_case = -1 , __snake_case = False , __snake_case = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCamelCase : int = compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCamelCase : Optional[int] = tokenizer.num_special_tokens_to_add(__snake_case)
_UpperCamelCase : Tuple = compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__snake_case)
# Generate dummy inputs according to compute batch and sequence
_UpperCamelCase : Union[str, Any] = [' '.join([tokenizer.unk_token]) * seq_length] * batch_size
_UpperCamelCase : Any = dict(tokenizer(__snake_case , return_tensors=__snake_case))
return common_inputs
def A__ ( self , __snake_case , __snake_case = -1 , __snake_case = -1 , __snake_case = False , __snake_case = None , ):
if self.task in ["default", "seq2seq-lm"]:
_UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case)
elif self.task == "causal-lm":
_UpperCamelCase : str = self._generate_dummy_inputs_for_causal_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case)
else:
_UpperCamelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case)
return common_inputs
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case):
if self.task in ["default", "seq2seq-lm"]:
_UpperCamelCase : Dict = super()._flatten_past_key_values_(__snake_case , __snake_case , __snake_case , __snake_case)
else:
_UpperCamelCase : int = super(__snake_case , self)._flatten_past_key_values_(
__snake_case , __snake_case , __snake_case , __snake_case)
| 648 |
lowerCAmelCase__ = range(2, 2_0 + 1)
lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) )
_UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) )
_UpperCamelCase , _UpperCamelCase : Dict = 0, 0
_UpperCamelCase : Optional[int] = n - i
_UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ )
if sub_memo is not None:
_UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ )
if jumps is not None and len(UpperCAmelCase_ ) > 0:
# find and make the largest jump without going over
_UpperCamelCase : str = -1
for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_UpperCamelCase : Optional[Any] = _k
break
if max_jump >= 0:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
_UpperCamelCase : Tuple = diff + c
for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ):
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
if new_c > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
_UpperCamelCase : Union[str, Any] = []
else:
_UpperCamelCase : List[Any] = {c: []}
_UpperCamelCase : Optional[int] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_UpperCamelCase , _UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_UpperCamelCase , _UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
_UpperCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_UpperCamelCase : Union[str, Any] = 0
while j < len(UpperCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(UpperCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_UpperCamelCase : Any = i
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = 0, 0, 0
for j in range(len(UpperCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_UpperCamelCase : Union[str, Any] = ds_c + ds_b
diff += addend
_UpperCamelCase : Union[str, Any] = 0
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = a_i[j] + addend
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return diff, i - start_i
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
_UpperCamelCase : List[str] = digits[j] + addend
if s >= 1_0:
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
_UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient
else:
_UpperCamelCase : Dict = s
_UpperCamelCase : Optional[Any] = addend // 1_0
if addend == 0:
break
while addend > 0:
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
digits.append(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = [1]
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : int = 0
while True:
_UpperCamelCase , _UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
_UpperCamelCase : str = 0
for j in range(len(UpperCAmelCase_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 648 | 1 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
snake_case = HfArgumentParser(InitializationArguments)
snake_case = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
snake_case = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
snake_case = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
snake_case = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
snake_case = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 103 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
lowerCAmelCase__ = (DDIMParallelScheduler,)
lowerCAmelCase__ = (('eta', 0.0), ('num_inference_steps', 5_0))
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = {
"num_train_timesteps": 1_000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCAmelCase )
return config
def _lowercase ( self: int ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCamelCase : Any = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = 10, 0.0
_lowerCamelCase : List[Any] = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for t in scheduler.timesteps:
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample
return sample
def _lowercase ( self: List[str] ):
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : Union[str, Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) )
def _lowercase ( self: Any ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase ,beta_end=__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,sample_max_value=__lowerCAmelCase ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ):
self.check_over_forward(time_step=__lowerCAmelCase ,num_inference_steps=__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCAmelCase ,eta=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1e-5
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCamelCase : str = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[int] = 10, 0.0
scheduler.set_timesteps(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : Optional[int] = self.dummy_sample_deter
_lowerCamelCase : List[str] = self.dummy_sample_deter + 0.1
_lowerCamelCase : Dict = self.dummy_sample_deter - 0.1
_lowerCamelCase : Union[str, Any] = samplea.shape[0]
_lowerCamelCase : List[Any] = torch.stack([samplea, samplea, samplea] ,dim=0 )
_lowerCamelCase : Dict = torch.arange(__lowerCAmelCase )[0:3, None].repeat(1 ,__lowerCAmelCase )
_lowerCamelCase : str = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
_lowerCamelCase : List[str] = scheduler.batch_step_no_noise(__lowerCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,__lowerCAmelCase )
_lowerCamelCase : str = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Any = self.full_loop()
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : int = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(prediction_type="v_prediction" )
_lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : List[str] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Dict = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : int = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3 | 46 | 0 |
def snake_case ( snake_case__ :Any) -> str:
assert isinstance(lowerCamelCase__ , lowerCamelCase__), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
_A = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(lowerCamelCase__)
else:
_A = sylvester(number - 1)
_A = num - 1
_A = num
return lower * upper + 1
if __name__ == "__main__":
print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 715 | from __future__ import annotations
from collections.abc import Callable
def snake_case ( snake_case__ :Callable[[int | float], int | float] , snake_case__ :int | float , snake_case__ :int | float , snake_case__ :int = 100 , ) -> float:
_A = x_start
_A = fnc(snake_case__)
_A = 0.0
for _ in range(snake_case__):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_A = (x_end - x_start) / steps + xa
_A = fnc(snake_case__)
area += abs(fxa + fxa) * (xa - xa) / 2
# Increment step
_A = xa
_A = fxa
return area
if __name__ == "__main__":
def snake_case ( snake_case__ :Tuple) -> List[str]:
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
_SCREAMING_SNAKE_CASE = 10
while i <= 100_000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 83 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=_A ):
SCREAMING_SNAKE_CASE_ : Optional[int] = ["torch", "scipy"]
def __init__( self : Union[str, Any] , *A : List[Any] , **A : List[Any] ) -> List[Any]:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def A ( cls : int , *A : Union[str, Any] , **A : int ) -> List[str]:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def A ( cls : Dict , *A : str , **A : List[str] ) -> Optional[int]:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 231 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowercase ( __snake_case : int ):
lowercase_ : Optional[int] = SwinConfig()
lowercase_ : str = swin_name.split('''_''' )
lowercase_ : int = name_split[1]
lowercase_ : Union[str, Any] = int(name_split[4] )
lowercase_ : List[Any] = int(name_split[3][-1] )
if model_size == "tiny":
lowercase_ : Union[str, Any] = 9_6
lowercase_ : int = (2, 2, 6, 2)
lowercase_ : List[str] = (3, 6, 1_2, 2_4)
elif model_size == "small":
lowercase_ : Optional[int] = 9_6
lowercase_ : Optional[Any] = (2, 2, 1_8, 2)
lowercase_ : Dict = (3, 6, 1_2, 2_4)
elif model_size == "base":
lowercase_ : Optional[Any] = 1_2_8
lowercase_ : int = (2, 2, 1_8, 2)
lowercase_ : List[str] = (4, 8, 1_6, 3_2)
else:
lowercase_ : List[Any] = 1_9_2
lowercase_ : int = (2, 2, 1_8, 2)
lowercase_ : str = (6, 1_2, 2_4, 4_8)
if "in22k" in swin_name:
lowercase_ : Dict = 2_1_8_4_1
else:
lowercase_ : Any = 1_0_0_0
lowercase_ : List[Any] = '''huggingface/label-files'''
lowercase_ : int = '''imagenet-1k-id2label.json'''
lowercase_ : Tuple = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowercase_ : Tuple = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase_ : str = idalabel
lowercase_ : List[str] = {v: k for k, v in idalabel.items()}
lowercase_ : Optional[Any] = img_size
lowercase_ : Tuple = num_classes
lowercase_ : Optional[Any] = embed_dim
lowercase_ : Union[str, Any] = depths
lowercase_ : Dict = num_heads
lowercase_ : int = window_size
return config
def lowercase ( __snake_case : str ):
if "patch_embed.proj" in name:
lowercase_ : Dict = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase_ : str = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase_ : int = '''encoder.''' + name
if "attn.proj" in name:
lowercase_ : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase_ : str = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase_ : Optional[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase_ : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase_ : Optional[int] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase_ : Any = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
lowercase_ : int = '''layernorm.weight'''
if name == "norm.bias":
lowercase_ : Optional[Any] = '''layernorm.bias'''
if "head" in name:
lowercase_ : Any = name.replace('''head''' , '''classifier''' )
else:
lowercase_ : Tuple = '''swin.''' + name
return name
def lowercase ( __snake_case : List[str] , __snake_case : List[Any] ):
for key in orig_state_dict.copy().keys():
lowercase_ : List[str] = orig_state_dict.pop(__snake_case )
if "mask" in key:
continue
elif "qkv" in key:
lowercase_ : int = key.split('''.''' )
lowercase_ : str = int(key_split[1] )
lowercase_ : Optional[int] = int(key_split[3] )
lowercase_ : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase_ : List[Any] = val[:dim, :]
lowercase_ : List[Any] = val[
dim : dim * 2, :
]
lowercase_ : int = val[-dim:, :]
else:
lowercase_ : Optional[Any] = val[
:dim
]
lowercase_ : Optional[Any] = val[
dim : dim * 2
]
lowercase_ : Union[str, Any] = val[
-dim:
]
else:
lowercase_ : Tuple = val
return orig_state_dict
def lowercase ( __snake_case : int , __snake_case : Optional[int] ):
lowercase_ : Any = timm.create_model(__snake_case , pretrained=__snake_case )
timm_model.eval()
lowercase_ : List[Any] = get_swin_config(__snake_case )
lowercase_ : Optional[Any] = SwinForImageClassification(__snake_case )
model.eval()
lowercase_ : Dict = convert_state_dict(timm_model.state_dict() , __snake_case )
model.load_state_dict(__snake_case )
lowercase_ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
lowercase_ : int = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
lowercase_ : Tuple = image_processor(images=__snake_case , return_tensors='''pt''' )
lowercase_ : List[str] = timm_model(inputs['''pixel_values'''] )
lowercase_ : List[Any] = model(**__snake_case ).logits
assert torch.allclose(__snake_case , __snake_case , atol=1e-3 )
print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 231 | 1 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 32 , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 255 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = [0.4814_5466, 0.457_8275, 0.4082_1073] , lowerCAmelCase__ = [0.2686_2954, 0.2613_0258, 0.2757_7711] , lowerCAmelCase__ = True , lowerCAmelCase__=7 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=3 , ) -> str:
"""simple docstring"""
_UpperCamelCase :List[str] =parent
_UpperCamelCase :List[Any] =do_resize
_UpperCamelCase :Optional[int] =size if size is not None else {"""shortest_edge""": 288}
_UpperCamelCase :Optional[Any] =size_divisor
_UpperCamelCase :str =do_rescale
_UpperCamelCase :Optional[Any] =rescale_factor
_UpperCamelCase :Union[str, Any] =do_normalize
_UpperCamelCase :Union[str, Any] =do_center_crop
_UpperCamelCase :List[str] =image_mean
_UpperCamelCase :Any =image_std
_UpperCamelCase :Tuple =do_pad
_UpperCamelCase :Any =batch_size
_UpperCamelCase :Union[str, Any] =num_channels
_UpperCamelCase :List[str] =min_resolution
_UpperCamelCase :int =max_resolution
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Tuple:
"""simple docstring"""
if not batched:
_UpperCamelCase :str =self.size["""shortest_edge"""]
_UpperCamelCase :List[str] =image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
_UpperCamelCase , _UpperCamelCase :List[str] =image.size
else:
_UpperCamelCase , _UpperCamelCase :Any =image.shape[1], image.shape[2]
_UpperCamelCase :Optional[Any] =size / min(lowerCAmelCase__ , lowerCAmelCase__ )
if h < w:
_UpperCamelCase , _UpperCamelCase :List[str] =size, scale * w
else:
_UpperCamelCase , _UpperCamelCase :List[Any] =scale * h, size
_UpperCamelCase :int =int((1_333 / 800) * size )
if max(lowerCAmelCase__ , lowerCAmelCase__ ) > max_size:
_UpperCamelCase :Union[str, Any] =max_size / max(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase :int =newh * scale
_UpperCamelCase :Optional[Any] =neww * scale
_UpperCamelCase , _UpperCamelCase :Dict =int(newh + 0.5 ), int(neww + 0.5 )
_UpperCamelCase , _UpperCamelCase :str =(
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
_UpperCamelCase :List[str] =[]
for image in image_inputs:
_UpperCamelCase , _UpperCamelCase :int =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCamelCase :str =max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
_UpperCamelCase :int =max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase__ ( __snake_case , unittest.TestCase ):
__UpperCAmelCase = BridgeTowerImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :str =BridgeTowerImageProcessingTester(self )
@property
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :int =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size_divisor""" ) )
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Tuple =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase :Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCamelCase :List[str] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_UpperCamelCase , _UpperCamelCase :Union[str, Any] =self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase :Union[str, Any] =image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
_UpperCamelCase , _UpperCamelCase :int =self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase :Tuple =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase :int =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCamelCase :Optional[Any] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_UpperCamelCase , _UpperCamelCase :List[Any] =self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase :Optional[int] =image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
_UpperCamelCase , _UpperCamelCase :str =self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase :str =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase :Union[str, Any] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_UpperCamelCase , _UpperCamelCase :List[Any] =self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase :Union[str, Any] =image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
_UpperCamelCase , _UpperCamelCase :Tuple =self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , ) | 512 | '''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
__UpperCAmelCase = StableDiffusionLDMaDPipeline
__UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase :Tuple =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_UpperCamelCase :Optional[Any] =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
_UpperCamelCase :Tuple =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCamelCase :int =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
_UpperCamelCase :List[str] =CLIPTextModel(lowerCAmelCase__ )
_UpperCamelCase :List[Any] =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_UpperCamelCase :List[str] ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Tuple:
"""simple docstring"""
if str(lowerCAmelCase__ ).startswith("""mps""" ):
_UpperCamelCase :str =torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCamelCase :Union[str, Any] =torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase :str ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :str ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase :List[Any] =self.get_dummy_components()
_UpperCamelCase :List[Any] =StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
_UpperCamelCase :List[str] =ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase :Union[str, Any] =self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase :Any =ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase :Tuple =output.rgb, output.depth
_UpperCamelCase :List[Any] =rgb[0, -3:, -3:, -1]
_UpperCamelCase :Union[str, Any] =depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCamelCase :Union[str, Any] =np.array(
[0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] )
_UpperCamelCase :Optional[int] =np.array([103.4_6727, 85.81_2004, 87.84_9236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :List[Any] =self.get_dummy_components()
_UpperCamelCase :int =StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
_UpperCamelCase :List[str] =ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase :Dict =self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase :Optional[int] =3 * [inputs["""prompt"""]]
# forward
_UpperCamelCase :List[str] =ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase :Tuple =output.rgb, output.depth
_UpperCamelCase :Any =rgb_slice_a[0, -3:, -3:, -1]
_UpperCamelCase :Optional[Any] =depth_slice_a[0, -3:, -1]
_UpperCamelCase :Tuple =self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase :List[Any] =3 * [inputs.pop("""prompt""" )]
_UpperCamelCase :str =ldmad_pipe.tokenizer(
lowerCAmelCase__ , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors="""pt""" , )
_UpperCamelCase :List[Any] =text_inputs["""input_ids"""].to(lowerCAmelCase__ )
_UpperCamelCase :Dict =ldmad_pipe.text_encoder(lowerCAmelCase__ )[0]
_UpperCamelCase :Dict =prompt_embeds
# forward
_UpperCamelCase :Tuple =ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase :Any =output.rgb, output.depth
_UpperCamelCase :Optional[int] =rgb_slice_a[0, -3:, -3:, -1]
_UpperCamelCase :List[Any] =depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :int ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase :Tuple =self.get_dummy_components()
_UpperCamelCase :str =PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
_UpperCamelCase :List[Any] =StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
_UpperCamelCase :int =ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase :str =self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase :Union[str, Any] ="""french fries"""
_UpperCamelCase :Any =ldmad_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase :Tuple =output.rgb, output.depth
_UpperCamelCase :List[str] =rgb[0, -3:, -3:, -1]
_UpperCamelCase :Any =depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCamelCase :Union[str, Any] =np.array(
[0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] )
_UpperCamelCase :List[str] =np.array([107.8_4738, 84.6_2802, 89.96_2135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__="cpu" , lowerCAmelCase__=torch.floataa , lowerCAmelCase__=0 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] =torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase :Any =np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
_UpperCamelCase :int =torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
_UpperCamelCase :Any ={
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Optional[Any] =StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
_UpperCamelCase :Union[str, Any] =ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase :Any =self.get_inputs(lowerCAmelCase__ )
_UpperCamelCase :Tuple =ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase :Any =output.rgb, output.depth
_UpperCamelCase :Tuple =rgb[0, -3:, -3:, -1].flatten()
_UpperCamelCase :Optional[int] =rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
_UpperCamelCase :Any =np.array(
[0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] )
_UpperCamelCase :int =np.array(
[0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__="cpu" , lowerCAmelCase__=torch.floataa , lowerCAmelCase__=0 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Dict =torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase :List[Any] =np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
_UpperCamelCase :Tuple =torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
_UpperCamelCase :int ={
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] =StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase :int =self.get_inputs(lowerCAmelCase__ )
_UpperCamelCase :Any =ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase :Optional[int] =output.rgb, output.depth
_UpperCamelCase :Tuple =0.49_5586
_UpperCamelCase :List[Any] =0.3379_5515
_UpperCamelCase :List[Any] =112.4_8518
_UpperCamelCase :str =98.48_9746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :int =StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase :Optional[Any] =self.get_inputs(lowerCAmelCase__ )
_UpperCamelCase :Dict =ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase :Optional[int] =output.rgb, output.depth
_UpperCamelCase :Optional[Any] =0.419_4127
_UpperCamelCase :Optional[Any] =0.3537_5586
_UpperCamelCase :Any =0.563_8502
_UpperCamelCase :Tuple =0.3468_6103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3 | 512 | 1 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt'}
a_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
a_ = {
'openbmb/cpm-ant-10b': 1_0_2_4,
}
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Any = collections.OrderedDict()
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as reader:
__lowercase : List[Any] = reader.readlines()
for index, token in enumerate(__UpperCamelCase ):
__lowercase : str = token.rstrip('''\n''' )
__lowercase : Optional[Any] = index
return vocab
class UpperCAmelCase_ ( snake_case ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<unk>" , UpperCamelCase_=2_00 ) -> Any:
__lowercase : Any = vocab
__lowercase : Dict = unk_token
__lowercase : List[str] = max_input_chars_per_word
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]:
__lowercase : Union[str, Any] = list(UpperCamelCase_ )
if len(UpperCamelCase_ ) > self.max_input_chars_per_word:
return [self.unk_token]
__lowercase : Any = 0
__lowercase : List[str] = []
while start < len(UpperCamelCase_ ):
__lowercase : List[str] = len(UpperCamelCase_ )
__lowercase : Optional[Any] = None
while start < end:
__lowercase : Union[str, Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
__lowercase : Union[str, Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(UpperCamelCase_ )
__lowercase : int = end
return sub_tokens
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase =["input_ids", "attention_mask"]
UpperCamelCase =False
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<d>" , UpperCamelCase_="</d>" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<unk>" , UpperCamelCase_="</n>" , UpperCamelCase_="</_>" , UpperCamelCase_="left" , **UpperCamelCase_ , ) -> int:
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=UpperCamelCase_ , eod_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , line_token=UpperCamelCase_ , space_token=UpperCamelCase_ , padding_side=UpperCamelCase_ , **UpperCamelCase_ , )
__lowercase : Optional[int] = bod_token
__lowercase : Optional[int] = eod_token
__lowercase : Union[str, Any] = load_vocab(UpperCamelCase_ )
__lowercase : List[str] = self.encoder[space_token]
__lowercase : Dict = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__lowercase : int = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase_ : x[1] ) )
__lowercase : List[str] = {v: k for k, v in self.encoder.items()}
__lowercase : Tuple = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _lowerCamelCase ( self ) -> List[str]:
return self.encoder[self.bod_token]
@property
def _lowerCamelCase ( self ) -> Optional[Any]:
return self.encoder[self.eod_token]
@property
def _lowerCamelCase ( self ) -> Optional[Any]:
return self.encoder["\n"]
@property
def _lowerCamelCase ( self ) -> int:
return len(self.encoder )
def _lowerCamelCase ( self ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[Any]:
__lowercase : List[str] = []
for x in jieba.cut(UpperCamelCase_ , cut_all=UpperCamelCase_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCamelCase_ ) )
return output_tokens
def _lowerCamelCase ( self , UpperCamelCase_ , **UpperCamelCase_ ) -> str:
__lowercase : Union[str, Any] = [i for i in token_ids if i >= 0]
__lowercase : int = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[Any]:
return token in self.encoder
def _lowerCamelCase ( self , UpperCamelCase_ ) -> str:
return "".join(UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[int]:
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[Any]:
return self.decoder.get(UpperCamelCase_ , self.unk_token )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]:
if os.path.isdir(UpperCamelCase_ ):
__lowercase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
__lowercase : Optional[Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
__lowercase : int = 0
if " " in self.encoder:
__lowercase : List[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
__lowercase : Dict = self.encoder['''\n''']
del self.encoder["\n"]
__lowercase : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase_ : x[1] ) )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
__lowercase : int = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ ))
return [1] + ([0] * len(UpperCamelCase_ ))
| 76 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = None , **snake_case_ , ) -> Dict:
'''simple docstring'''
super().__init__(
snake_case_ , split=snake_case_ , features=snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ , streaming=snake_case_ , num_proc=snake_case_ , **snake_case_ , )
__lowercase = path_or_paths if isinstance(snake_case_ , snake_case_ ) else {self.split: path_or_paths}
__lowercase = Text(
cache_dir=snake_case_ , data_files=snake_case_ , features=snake_case_ , **snake_case_ , )
def A ( self ) -> int:
'''simple docstring'''
if self.streaming:
__lowercase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = None
self.builder.download_and_prepare(
download_config=snake_case_ , download_mode=snake_case_ , verification_mode=snake_case_ , base_path=snake_case_ , num_proc=self.num_proc , )
__lowercase = self.builder.as_dataset(
split=self.split , verification_mode=snake_case_ , in_memory=self.keep_in_memory )
return dataset
| 639 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
_lowerCamelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def lowerCAmelCase_ ( lowercase_ : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = {}
with open(lowercase_ , '''r''' ) as file:
for line_number, line in enumerate(lowercase_ ):
__SCREAMING_SNAKE_CASE : Dict = line.strip()
if line:
__SCREAMING_SNAKE_CASE : str = line.split()
__SCREAMING_SNAKE_CASE : Optional[int] = line_number
__SCREAMING_SNAKE_CASE : Dict = words[0]
__SCREAMING_SNAKE_CASE : Optional[int] = value
return result
def lowerCAmelCase_ ( lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : str ):
'''simple docstring'''
for attribute in key.split('''.''' ):
__SCREAMING_SNAKE_CASE : int = getattr(lowercase_ , lowercase_ )
__SCREAMING_SNAKE_CASE : str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase_ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__SCREAMING_SNAKE_CASE : Dict = '''param'''
if weight_type is not None and weight_type != "param":
__SCREAMING_SNAKE_CASE : Optional[Any] = getattr(lowercase_ , lowercase_ ).shape
elif weight_type is not None and weight_type == "param":
__SCREAMING_SNAKE_CASE : List[str] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
__SCREAMING_SNAKE_CASE : List[Any] = getattr(lowercase_ , lowercase_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = shape_pointer.shape
# let's reduce dimension
__SCREAMING_SNAKE_CASE : Union[str, Any] = value[0]
else:
__SCREAMING_SNAKE_CASE : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__SCREAMING_SNAKE_CASE : Optional[Any] = value
elif weight_type == "weight_g":
__SCREAMING_SNAKE_CASE : Tuple = value
elif weight_type == "weight_v":
__SCREAMING_SNAKE_CASE : Tuple = value
elif weight_type == "bias":
__SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
__SCREAMING_SNAKE_CASE : Dict = getattr(lowercase_ , lowercase_ )
__SCREAMING_SNAKE_CASE : List[str] = value
else:
__SCREAMING_SNAKE_CASE : Any = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowerCAmelCase_ ( lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase_ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__SCREAMING_SNAKE_CASE : Optional[int] = '''param'''
if weight_type is not None and weight_type != "param":
__SCREAMING_SNAKE_CASE : Any = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__SCREAMING_SNAKE_CASE : Dict = '''.'''.join([key, hf_param_name] )
else:
__SCREAMING_SNAKE_CASE : List[str] = key
__SCREAMING_SNAKE_CASE : Optional[int] = value if '''lm_head''' in full_key else value[0]
_lowerCamelCase = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def lowerCAmelCase_ ( lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , lowercase_ : Any=None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = False
for key, mapped_key in MAPPING.items():
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__SCREAMING_SNAKE_CASE : Optional[int] = True
if "*" in mapped_key:
__SCREAMING_SNAKE_CASE : Any = name.split(lowercase_ )[0].split('''.''' )[-2]
__SCREAMING_SNAKE_CASE : Union[str, Any] = mapped_key.replace('''*''' , lowercase_ )
if "weight_g" in name:
__SCREAMING_SNAKE_CASE : Any = '''weight_g'''
elif "weight_v" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = '''weight_v'''
elif "bias" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__SCREAMING_SNAKE_CASE : List[Any] = '''weight'''
else:
__SCREAMING_SNAKE_CASE : int = None
if hf_dict is not None:
rename_dict(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return is_used
return is_used
def lowerCAmelCase_ ( lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = []
__SCREAMING_SNAKE_CASE : Dict = fairseq_model.state_dict()
__SCREAMING_SNAKE_CASE : List[str] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__SCREAMING_SNAKE_CASE : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == '''group''' , )
__SCREAMING_SNAKE_CASE : Tuple = True
else:
__SCREAMING_SNAKE_CASE : Any = load_wavaveca_layer(lowercase_ , lowercase_ , lowercase_ )
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCAmelCase_ ( lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = full_name.split('''conv_layers.''' )[-1]
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.split('''.''' )
__SCREAMING_SNAKE_CASE : Dict = int(items[0] )
__SCREAMING_SNAKE_CASE : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__SCREAMING_SNAKE_CASE : Dict = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__SCREAMING_SNAKE_CASE : Any = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__SCREAMING_SNAKE_CASE : Tuple = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def lowerCAmelCase_ ( lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : List[str]=None , lowercase_ : Tuple=None , lowercase_ : str=True , lowercase_ : Tuple=False ):
'''simple docstring'''
if config_path is not None:
__SCREAMING_SNAKE_CASE : Dict = WavaVecaConfig.from_pretrained(lowercase_ )
else:
__SCREAMING_SNAKE_CASE : str = WavaVecaConfig()
if is_seq_class:
__SCREAMING_SNAKE_CASE : Optional[Any] = read_txt_into_dict(lowercase_ )
__SCREAMING_SNAKE_CASE : int = idalabel
__SCREAMING_SNAKE_CASE : str = WavaVecaForSequenceClassification(lowercase_ )
__SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
feature_extractor.save_pretrained(lowercase_ )
elif is_finetuned:
if dict_path:
__SCREAMING_SNAKE_CASE : List[str] = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__SCREAMING_SNAKE_CASE : Union[str, Any] = target_dict.pad_index
__SCREAMING_SNAKE_CASE : List[str] = target_dict.bos_index
__SCREAMING_SNAKE_CASE : Optional[int] = target_dict.eos_index
__SCREAMING_SNAKE_CASE : Any = len(target_dict.symbols )
__SCREAMING_SNAKE_CASE : Dict = os.path.join(lowercase_ , '''vocab.json''' )
if not os.path.isdir(lowercase_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowercase_ ) )
return
os.makedirs(lowercase_ , exist_ok=lowercase_ )
__SCREAMING_SNAKE_CASE : str = target_dict.indices
# fairseq has the <pad> and <s> switched
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : List[Any] = 1
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(lowercase_ , lowercase_ )
__SCREAMING_SNAKE_CASE : int = WavaVecaCTCTokenizer(
lowercase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowercase_ , )
__SCREAMING_SNAKE_CASE : List[Any] = True if config.feat_extract_norm == '''layer''' else False
__SCREAMING_SNAKE_CASE : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
__SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
__SCREAMING_SNAKE_CASE : str = WavaVecaForCTC(lowercase_ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaForPreTraining(lowercase_ )
if is_finetuned or is_seq_class:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__SCREAMING_SNAKE_CASE : str = argparse.Namespace(task='''audio_pretraining''' )
__SCREAMING_SNAKE_CASE : Any = fairseq.tasks.setup_task(lowercase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase_ )
__SCREAMING_SNAKE_CASE : Dict = model[0].eval()
recursively_load_weights(lowercase_ , lowercase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 700 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
_lowerCamelCase = '''path-to-your-trained-model'''
_lowerCamelCase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
_lowerCamelCase = '''A photo of sks dog in a bucket'''
_lowerCamelCase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 401 | 0 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase__ = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase__ = '''=======
>>>>>>>
'''
lowerCAmelCase__ = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase__ = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _A ( A__ ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : ArgumentParser ):
__lowercase = parser.add_parser(
'''convert''' ,help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' ,)
train_parser.add_argument(
'''--tfds_path''' ,type=lowercase__ ,required=lowercase__ ,help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' ,)
train_parser.add_argument(
'''--datasets_directory''' ,type=lowercase__ ,required=lowercase__ ,help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=lowercase__ )
def __init__( self : Optional[int] ,lowercase__ : str ,lowercase__ : str ,*lowercase__ : Optional[int] ):
__lowercase = get_logger('''datasets-cli/converting''' )
__lowercase = tfds_path
__lowercase = datasets_directory
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
if os.path.isdir(self._tfds_path ):
__lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(F"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
__lowercase = []
__lowercase = []
__lowercase = {}
if os.path.isdir(self._tfds_path ):
__lowercase = os.listdir(lowercase__ )
else:
__lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"Looking at file {f_name}" )
__lowercase = os.path.join(lowercase__ ,lowercase__ )
__lowercase = os.path.join(lowercase__ ,lowercase__ )
if not os.path.isfile(lowercase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(lowercase__ ,encoding='''utf-8''' ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = False
__lowercase = False
__lowercase = []
for line in lines:
__lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__lowercase = ''''''
continue
elif "from absl import logging" in out_line:
__lowercase = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__lowercase = out_line.replace('''getLogger''' ,'''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase = True
__lowercase = list(filter(lambda lowercase__ : e in out_line ,lowercase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowercase__ ) + '''\n''' )
out_lines.append(lowercase__ )
out_lines.append(lowercase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase = re.sub(lowercase__ ,lowercase__ ,lowercase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' ,lowercase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__lowercase = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase = True
out_lines.append(lowercase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase = f_name.replace('''.py''' ,'''''' )
__lowercase = os.path.join(lowercase__ ,lowercase__ )
__lowercase = os.path.join(lowercase__ ,lowercase__ )
os.makedirs(lowercase__ ,exist_ok=lowercase__ )
self._logger.info(F"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowercase__ )
if needs_manual_update:
with_manual_update.append(lowercase__ )
with open(lowercase__ ,'''w''' ,encoding='''utf-8''' ) as f:
f.writelines(lowercase__ )
self._logger.info(F"Converted in {output_file}" )
for utils_file in utils_files:
try:
__lowercase = os.path.basename(lowercase__ )
__lowercase = imports_to_builder_map[f_name.replace('''.py''' ,'''''' )]
self._logger.info(F"Moving {dest_folder} to {utils_file}" )
shutil.copy(lowercase__ ,lowercase__ )
except KeyError:
self._logger.error(F"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 41 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : Dict ,lowercase__ : int=1_3 ,lowercase__ : List[str]=7 ,lowercase__ : int=True ,lowercase__ : int=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : List[Any]=True ,lowercase__ : str=9_9 ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : Union[str, Any]=5 ,lowercase__ : List[Any]=4 ,lowercase__ : str=3_7 ,lowercase__ : Tuple="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Dict=0.1 ,lowercase__ : int=1_2_8 ,lowercase__ : Dict=3_2 ,lowercase__ : Dict=1_6 ,lowercase__ : Any=2 ,lowercase__ : int=0.0_2 ,lowercase__ : List[str]=3 ,lowercase__ : Dict=4 ,lowercase__ : Optional[int]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return NezhaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : str ):
__lowercase = NezhaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,):
__lowercase = True
__lowercase = NezhaModel(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,)
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,)
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
__lowercase = NezhaForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
__lowercase = NezhaForNextSentencePrediction(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ):
__lowercase = NezhaForPreTraining(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,next_sentence_label=lowercase__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ):
__lowercase = NezhaForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : int ):
__lowercase = self.num_labels
__lowercase = NezhaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Optional[Any] ):
__lowercase = self.num_labels
__lowercase = NezhaForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : str ):
__lowercase = self.num_choices
__lowercase = NezhaForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[str] = True
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Any=False ):
__lowercase = super()._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowercase__ )
__lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowercase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = NezhaModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
# This regression test was failing with PyTorch < 1.3
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = NezhaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__lowercase = True
__lowercase = model_class(config=lowercase__ )
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ )
__lowercase = torch.jit.trace(
lowercase__ ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase__ ,os.path.join(lowercase__ ,'''bert.pt''' ) )
__lowercase = torch.jit.load(os.path.join(lowercase__ ,'''bert.pt''' ) ,map_location=lowercase__ )
loaded(inputs_dict['''input_ids'''].to(lowercase__ ) ,inputs_dict['''attention_mask'''].to(lowercase__ ) )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
| 41 | 1 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : Union[str, Any] = logging.getLogger()
lowerCamelCase_ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Dict ):
os.makedirs(snake_case_ , exist_ok=snake_case_ )
UpperCamelCase_: int = {"""source""": """What is love ?""", """target""": """life"""}
UpperCamelCase_: Tuple = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase_: Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(snake_case_ , f'''{split}.{field}''' ) , """w""" ) as f:
f.write(snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : int , snake_case_ : str = "pytorch" ):
UpperCamelCase_: Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Dict = os.path.join(snake_case_ , """output""" )
UpperCamelCase_: Any = os.path.join(snake_case_ , """data""" )
self._create_dummy_data(data_dir=snake_case_ )
UpperCamelCase_: Union[str, Any] = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
UpperCamelCase_: Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(snake_case_ , env=self.get_env() )
UpperCamelCase_: Optional[int] = os.path.join(snake_case_ , """metrics.json""" )
with open(snake_case_ ) as f:
UpperCamelCase_: Any = json.load(snake_case_ )
return result
@require_torch_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 670 |
def A__ ( lowerCamelCase = 50 ) -> int:
UpperCamelCase_: List[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 670 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 99 |
import argparse
import os
import re
import packaging.version
snake_case__ : List[Any] = '''examples/'''
snake_case__ : Union[str, Any] = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
snake_case__ : Tuple = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
snake_case__ : Union[str, Any] = '''README.md'''
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase__ = f.read()
UpperCAmelCase__ , UpperCAmelCase__ = REPLACE_PATTERNS[pattern]
UpperCAmelCase__ = replace.replace("""VERSION""" , _lowerCAmelCase )
UpperCAmelCase__ = re_pattern.sub(_lowerCAmelCase , _lowerCAmelCase )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_lowerCAmelCase )
def lowercase ( _lowerCAmelCase ):
for folder, directories, fnames in os.walk(_lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , pattern="""examples""" )
def lowercase ( _lowerCAmelCase , _lowerCAmelCase=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not patch:
update_version_in_examples(_lowerCAmelCase )
def lowercase ( ):
UpperCAmelCase__ = """🤗 Transformers currently provides the following architectures"""
UpperCAmelCase__ = """1. Want to contribute a new model?"""
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase__ = f.readlines()
# Find the start of the list.
UpperCAmelCase__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
UpperCAmelCase__ = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_lowerCAmelCase )
def lowercase ( ):
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
UpperCAmelCase__ = f.read()
UpperCAmelCase__ = REPLACE_PATTERNS["""init"""][0].search(_lowerCAmelCase ).groups()[0]
return packaging.version.parse(_lowerCAmelCase )
def lowercase ( _lowerCAmelCase=False ):
UpperCAmelCase__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
UpperCAmelCase__ = default_version.base_version
elif patch:
UpperCAmelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
UpperCAmelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
UpperCAmelCase__ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_lowerCAmelCase ) == 0:
UpperCAmelCase__ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCAmelCase , patch=_lowerCAmelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def lowercase ( ):
UpperCAmelCase__ = get_version()
UpperCAmelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
UpperCAmelCase__ = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_lowerCAmelCase ) == 0:
UpperCAmelCase__ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCAmelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
snake_case__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
snake_case__ : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 392 | 0 |
'''simple docstring'''
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowercase_ = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Dict = 'facebook/nllb-200-distilled-600M'
__lowerCamelCase : Optional[Any] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__lowerCamelCase : Optional[int] = 'translator'
__lowerCamelCase : int = AutoTokenizer
__lowerCamelCase : List[Any] = AutoModelForSeqaSeqLM
__lowerCamelCase : int = LANGUAGE_CODES
__lowerCamelCase : Tuple = ['text', 'text', 'text']
__lowerCamelCase : Optional[Any] = ['text']
def a__ (self , A , A , A ) -> List[str]:
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''' )
_a = self.lang_to_code[src_lang]
_a = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
A , return_tensors='''pt''' , src_lang=A , tgt_lang=A )
def a__ (self , A ) -> Optional[Any]:
"""simple docstring"""
return self.model.generate(**A )
def a__ (self , A ) -> List[str]:
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=A )
| 709 |
'''simple docstring'''
lowercase_ = 256
# Modulus to hash a string
lowercase_ = 1_000_003
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = len(__A)
_a = len(__A)
if p_len > t_len:
return False
_a = 0
_a = 0
_a = 1
# Calculating the hash of pattern and substring of text
for i in range(__A):
_a = (ord(pattern[i]) + p_hash * alphabet_size) % modulus
_a = (ord(text[i]) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_a = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_a = (
(text_hash - ord(text[i]) * modulus_power) * alphabet_size
+ ord(text[i + p_len])
) % modulus
return False
def lowerCAmelCase ():
"""simple docstring"""
_a = '''abc1abc12'''
_a = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_a = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(__A , __A) and not rabin_karp(__A , __A)
# Test 2)
_a = '''ABABX'''
_a = '''ABABZABABYABABX'''
assert rabin_karp(__A , __A)
# Test 3)
_a = '''AAAB'''
_a = '''ABAAAAAB'''
assert rabin_karp(__A , __A)
# Test 4)
_a = '''abcdabcy'''
_a = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(__A , __A)
# Test 5)
_a = '''Lü'''
_a = '''Lüsai'''
assert rabin_karp(__A , __A)
_a = '''Lue'''
assert not rabin_karp(__A , __A)
print('''Success.''')
if __name__ == "__main__":
test_rabin_karp()
| 352 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = args.pruning_method
lowercase__ = args.threshold
lowercase__ = args.model_name_or_path.rstrip('''/''' )
lowercase__ = args.target_model_path
print(f'Load fine-pruned model from {model_name_or_path}' )
lowercase__ = torch.load(os.path.join(SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
lowercase__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowercase__ = tensor
print(f'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
lowercase__ = tensor
print(f'Copied layer {name}' )
elif "bias" in name:
lowercase__ = tensor
print(f'Copied layer {name}' )
else:
if pruning_method == "magnitude":
lowercase__ = MagnitudeBinarizer.apply(inputs=SCREAMING_SNAKE_CASE , threshold=SCREAMING_SNAKE_CASE )
lowercase__ = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowercase__ = name[:-6]
lowercase__ = model[f'{prefix_}mask_scores']
lowercase__ = TopKBinarizer.apply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowercase__ = name[:-6]
lowercase__ = model[f'{prefix_}mask_scores']
lowercase__ = ThresholdBinarizer.apply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowercase__ = name[:-6]
lowercase__ = model[f'{prefix_}mask_scores']
lowercase__ , lowercase__ = -0.1, 1.1
lowercase__ = torch.sigmoid(SCREAMING_SNAKE_CASE )
lowercase__ = s * (r - l) + l
lowercase__ = s_bar.clamp(min=0.0 , max=1.0 )
lowercase__ = tensor * mask
print(f'Pruned layer {name}' )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
lowercase__ = os.path.join(
os.path.dirname(SCREAMING_SNAKE_CASE ) , f'bertarized_{os.path.basename(SCREAMING_SNAKE_CASE )}' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
shutil.copytree(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'\nCreated folder {target_model_path}' )
torch.save(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
lowerCAmelCase = parser.parse_args()
main(args)
| 43 |
'''simple docstring'''
UpperCamelCase__ = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 1000000,
"gigajoule": 1000000000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 3600000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 4186800.00,
"electronvolt": 1.602176634e-19,
"britishthermalunit_it": 1055.05585,
"footpound": 1.35_58_18,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowercase_ : Union[str, Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {", ".join(_UpperCamelCase )}"""
)
raise ValueError(_UpperCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 620 | 0 |
"""simple docstring"""
from itertools import permutations
def lowercase (_lowerCAmelCase ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__lowerCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(_lowerCAmelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase (_lowerCAmelCase = 10 ):
return sum(
int("""""".join(map(_lowerCAmelCase , _lowerCAmelCase ) ) )
for num in permutations(range(_lowerCAmelCase ) )
if is_substring_divisible(_lowerCAmelCase ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 710 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = StableDiffusionXLImgaImgPipeline
_snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''}
_snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
_snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=snake_case_ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__lowerCAmelCase = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
__lowerCAmelCase = CLIPTextModel(snake_case_ )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=snake_case_ )
__lowerCAmelCase = CLIPTextModelWithProjection(snake_case_ )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=snake_case_ )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def A__ ( self , snake_case_ , snake_case_=0 ) -> List[str]:
__lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
__lowerCAmelCase = image / 2 + 0.5
if str(snake_case_ ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(snake_case_ )
else:
__lowerCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def A__ ( self ) -> Tuple:
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**snake_case_ )
__lowerCAmelCase = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
__lowerCAmelCase = self.get_dummy_inputs(snake_case_ )
__lowerCAmelCase = sd_pipe(**snake_case_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ) -> int:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def A__ ( self ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def A__ ( self ) -> int:
pass
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**snake_case_ )
__lowerCAmelCase = sd_pipe.to(snake_case_ )
__lowerCAmelCase = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
# forward without prompt embeds
__lowerCAmelCase = self.get_dummy_inputs(snake_case_ )
__lowerCAmelCase = 3 * ["""this is a negative prompt"""]
__lowerCAmelCase = negative_prompt
__lowerCAmelCase = 3 * [inputs["""prompt"""]]
__lowerCAmelCase = sd_pipe(**snake_case_ )
__lowerCAmelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__lowerCAmelCase = self.get_dummy_inputs(snake_case_ )
__lowerCAmelCase = 3 * ["""this is a negative prompt"""]
__lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = sd_pipe.encode_prompt(snake_case_ , negative_prompt=snake_case_ )
__lowerCAmelCase = sd_pipe(
**snake_case_ , prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , pooled_prompt_embeds=snake_case_ , negative_pooled_prompt_embeds=snake_case_ , )
__lowerCAmelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self , snake_case_ , snake_case_="cpu" , snake_case_=torch.floataa , snake_case_=0 ) -> Optional[int]:
__lowerCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__lowerCAmelCase = np.random.RandomState(snake_case_ ).standard_normal((1, 4, 64, 64) )
__lowerCAmelCase = torch.from_numpy(snake_case_ ).to(device=snake_case_ , dtype=snake_case_ )
__lowerCAmelCase = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def A__ ( self ) -> Any:
__lowerCAmelCase = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__lowerCAmelCase = self.get_inputs(snake_case_ )
__lowerCAmelCase = pipe(**snake_case_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 573 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
UpperCamelCase = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
UpperCamelCase = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
UpperCamelCase = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase( self ):
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = CHRF.CHAR_ORDER , __UpperCAmelCase = CHRF.WORD_ORDER , __UpperCAmelCase = CHRF.BETA , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , ):
__A : int = len(references[0] )
if any(len(_snake_case ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
__A : List[str] = [[refs[i] for refs in references] for i in range(_snake_case )]
__A : Dict = CHRF(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
__A : Any = sb_chrf.corpus_score(_snake_case , _snake_case )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 520 |
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : List[Any] = tmp_path / '''cache'''
lowercase__ : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : List[str] = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Dict = tmp_path / '''cache'''
lowercase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowercase__ : int = features.copy() if features else default_expected_features
lowercase__ : Dict = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase__ : Dict = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
lowercase__ : Dict = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : Optional[Any] = tmp_path / '''cache'''
lowercase__ : Tuple = os.path.join(__lowerCamelCase , '''tmp.sql''' )
lowercase__ : Union[str, Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
lowercase__ : Any = iter_sql_file(__lowerCamelCase )
lowercase__ : Any = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : Any = tmp_path / '''cache'''
lowercase__ : int = os.path.join(__lowerCamelCase , '''tmp.sql''' )
lowercase__ : Optional[int] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
lowercase__ : Optional[Any] = iter_sql_file(__lowerCamelCase )
lowercase__ : List[str] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
lowercase__ : List[Any] = tmp_path / '''cache'''
lowercase__ : Optional[int] = os.path.join(__lowerCamelCase , '''tmp.sql''' )
lowercase__ : Dict = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 560 | 0 |
def UpperCAmelCase ( A__ , A__ , A__ ) -> List[Any]:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(A__ , n - 1 , A__ ) * a) % mod
else:
_snake_case : List[str] = binary_exponentiation(A__ , n / 2 , A__ )
return (b * b) % mod
# a prime number
UpperCAmelCase_ = 701
UpperCAmelCase_ = 1_000_000_000
UpperCAmelCase_ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 709 |
def UpperCAmelCase ( A__ ) -> list[list[int]]:
_snake_case : List[str] = []
if len(A__ ) == 1:
return [nums.copy()]
for _ in range(len(A__ ) ):
_snake_case : Optional[Any] = nums.pop(0 )
_snake_case : Any = permute(A__ )
for perm in permutations:
perm.append(A__ )
result.extend(A__ )
nums.append(A__ )
return result
def UpperCAmelCase ( A__ ) -> List[Any]:
def backtrack(A__ ):
if start == len(A__ ) - 1:
output.append(nums[:] )
else:
for i in range(A__ , len(A__ ) ):
_snake_case , _snake_case : Dict = nums[i], nums[start]
backtrack(start + 1 )
_snake_case , _snake_case : Union[str, Any] = nums[i], nums[start] # backtrack
_snake_case : int = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
UpperCAmelCase_ = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 519 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = AutoTokenizer.from_pretrained('google/mt5-small' )
UpperCamelCase_ = tokenizer('Hello there' , return_tensors='pt' ).input_ids
UpperCamelCase_ = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
UpperCamelCase_ = model(input_ids.to(_UpperCAmelCase ) , labels=labels.to(_UpperCAmelCase ) ).loss
UpperCamelCase_ = -(labels.shape[-1] * loss.item())
UpperCamelCase_ = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 23 | '''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCamelCase( _A : Any , _A : List[str]=() , _A : List[str]=None , _A : Dict="no" , _A : List[str]="29500" ):
'''simple docstring'''
UpperCAmelCase__ : int = False
UpperCAmelCase__ : List[str] = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCAmelCase__ : List[str] = True
elif "IPython" in sys.modules:
UpperCAmelCase__ : List[Any] = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCAmelCase__ : Tuple = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , _A ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCAmelCase__ : Tuple = 8
UpperCAmelCase__ : Optional[Any] = PrepareForLaunch(_A , distributed_type='''TPU''' )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(_A , args=_A , nprocs=_A , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*_A )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_A , master_addr='''127.0.01''' , master_port=_A , mixed_precision=_A ):
UpperCAmelCase__ : str = PrepareForLaunch(_A , distributed_type='''MULTI_GPU''' )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(_A , args=_A , nprocs=_A , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCAmelCase__ : Union[str, Any] = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*_A )
def __UpperCamelCase( _A : List[str] , _A : Optional[Any]=() , _A : str=2 ):
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_A , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCAmelCase__ : Optional[int] = PrepareForLaunch(_A , debug=_A )
start_processes(_A , args=_A , nprocs=_A , start_method='''fork''' )
| 614 | 0 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self , _lowercase ):
lowerCAmelCase_ : str = 3
lowerCAmelCase_ : Optional[int] = 250
lowerCAmelCase_ : Any = ids_tensor((batch_size, length) , _lowercase )
lowerCAmelCase_ : Union[str, Any] = torch.ones((batch_size, length) , device=_lowercase , dtype=torch.float ) / length
return input_ids, scores
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : List[Any] = self._get_tensors(5 )
lowerCAmelCase_ : Tuple = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_lowercase , _lowercase ) )
lowerCAmelCase_ : Optional[Any] = self._get_tensors(9 )
self.assertFalse(criteria(_lowercase , _lowercase ) )
lowerCAmelCase_ : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(_lowercase , _lowercase ) )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : List[str] = MaxLengthCriteria(max_length=10 )
lowerCAmelCase_ : Optional[int] = self._get_tensors(5 )
self.assertFalse(criteria(_lowercase , _lowercase ) )
lowerCAmelCase_ : int = self._get_tensors(9 )
self.assertFalse(criteria(_lowercase , _lowercase ) )
lowerCAmelCase_ : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(_lowercase , _lowercase ) )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : str = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase_ : int = self._get_tensors(5 )
self.assertFalse(criteria(_lowercase , _lowercase ) )
lowerCAmelCase_ : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(_lowercase , _lowercase ) )
lowerCAmelCase_ : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(_lowercase , _lowercase ) )
lowerCAmelCase_ : Optional[Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Any = self._get_tensors(5 )
lowerCAmelCase_ : List[str] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_lowercase , _lowercase ) )
lowerCAmelCase_ : Union[str, Any] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_lowercase , _lowercase ) )
def UpperCAmelCase__ ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(_lowercase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowerCAmelCase_ : List[Any] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(_lowercase ) , 1 )
| 715 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase_ : Tuple = """src/diffusers"""
UpperCAmelCase_ : str = """."""
# This is to make sure the diffusers module imported is the one in the repo.
UpperCAmelCase_ : Any = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCAmelCase_ : Union[str, Any] = spec.loader.load_module()
def _lowerCAmelCase ( _a : Optional[int] , _a : Optional[int] ) -> Tuple:
return line.startswith(_a ) or len(_a ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , _a ) is not None
def _lowerCAmelCase ( _a : List[Any] ) -> int:
lowerCAmelCase_ : List[str] = object_name.split(""".""" )
lowerCAmelCase_ : List[Any] = 0
# First let's find the module where our object lives.
lowerCAmelCase_ : Optional[int] = parts[i]
while i < len(_a ) and not os.path.isfile(os.path.join(_a , F'{module}.py' ) ):
i += 1
if i < len(_a ):
lowerCAmelCase_ : Dict = os.path.join(_a , parts[i] )
if i >= len(_a ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(_a , F'{module}.py' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase_ : Union[str, Any] = f.readlines()
# Now let's find the class / func in the code!
lowerCAmelCase_ : List[Any] = """"""
lowerCAmelCase_ : Optional[int] = 0
for name in parts[i + 1 :]:
while (
line_index < len(_a ) and re.search(RF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_a ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCAmelCase_ : str = line_index
while line_index < len(_a ) and _should_continue(lines[line_index] , _a ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase_ : int = lines[start_index:line_index]
return "".join(_a )
UpperCAmelCase_ : str = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
UpperCAmelCase_ : int = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""")
UpperCAmelCase_ : List[str] = re.compile(r"""<FILL\s+[^>]*>""")
def _lowerCAmelCase ( _a : Dict ) -> List[Any]:
lowerCAmelCase_ : int = code.split("""\n""" )
lowerCAmelCase_ : Any = 0
while idx < len(_a ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_a ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def _lowerCAmelCase ( _a : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = len(get_indent(_a ) ) > 0
if has_indent:
lowerCAmelCase_ : Union[str, Any] = F'class Bla:\n{code}'
lowerCAmelCase_ : Tuple = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=_a )
lowerCAmelCase_ : str = black.format_str(_a , mode=_a )
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = style_docstrings_in_code(_a )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def _lowerCAmelCase ( _a : Tuple , _a : str=False ) -> Tuple:
with open(_a , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase_ : List[str] = f.readlines()
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Dict = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_a ):
lowerCAmelCase_ : str = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = search.groups()
lowerCAmelCase_ : List[Any] = find_code_in_diffusers(_a )
lowerCAmelCase_ : Tuple = get_indent(_a )
lowerCAmelCase_ : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCAmelCase_ : str = theoretical_indent
lowerCAmelCase_ : Tuple = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCAmelCase_ : List[Any] = True
while line_index < len(_a ) and should_continue:
line_index += 1
if line_index >= len(_a ):
break
lowerCAmelCase_ : List[Any] = lines[line_index]
lowerCAmelCase_ : Tuple = _should_continue(_a , _a ) and re.search(F'^{indent}# End copy' , _a ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase_ : str = lines[start_index:line_index]
lowerCAmelCase_ : Optional[Any] = """""".join(_a )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCAmelCase_ : Optional[int] = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(_a ) is None]
lowerCAmelCase_ : str = """\n""".join(_a )
# Before comparing, use the `replace_pattern` on the original code.
if len(_a ) > 0:
lowerCAmelCase_ : List[str] = replace_pattern.replace("""with""" , """""" ).split(""",""" )
lowerCAmelCase_ : Any = [_re_replace_pattern.search(_a ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = pattern.groups()
lowerCAmelCase_ : Union[str, Any] = re.sub(_a , _a , _a )
if option.strip() == "all-casing":
lowerCAmelCase_ : Dict = re.sub(obja.lower() , obja.lower() , _a )
lowerCAmelCase_ : Dict = re.sub(obja.upper() , obja.upper() , _a )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCAmelCase_ : str = blackify(lines[start_index - 1] + theoretical_code )
lowerCAmelCase_ : Optional[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCAmelCase_ : Any = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCAmelCase_ : Dict = start_index + 1
if overwrite and len(_a ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(_a , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_a )
return diffs
def _lowerCAmelCase ( _a : bool = False ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = glob.glob(os.path.join(_a , """**/*.py""" ) , recursive=_a )
lowerCAmelCase_ : Union[str, Any] = []
for filename in all_files:
lowerCAmelCase_ : Union[str, Any] = is_copy_consistent(_a , _a )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(_a ) > 0:
lowerCAmelCase_ : List[Any] = """\n""".join(_a )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase_ : List[str] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 440 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ) -> List[str]: # noqa: E741
_lowerCAmelCase : List[str] = len(_lowerCamelCase )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = [0] * n
_lowerCAmelCase : List[str] = [False] * n
_lowerCAmelCase : List[str] = [False] * n
def dfs(_lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : str ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase : str = True
_lowerCAmelCase : Optional[int] = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase : int = dfs(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Tuple = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase : Union[str, Any] = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase : Optional[Any] = True
else:
_lowerCAmelCase : Dict = min(low[at] ,_lowerCamelCase )
return out_edge_count
for i in range(_lowerCamelCase ):
if not visited[i]:
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Any = dfs(_lowerCamelCase ,_lowerCamelCase ,-1 ,_lowerCamelCase )
_lowerCAmelCase : Tuple = out_edge_count > 1
for x in range(len(_lowerCamelCase ) ):
if is_art[x] is True:
print(_lowerCamelCase )
# Adjacency list of graph
_a : List[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 213 | """simple docstring"""
import os
import sys
import unittest
_a : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_a : Dict = os.path.join(git_repo_path, 'src', 'transformers')
_a : str = '\n{0} = None\n'
_a : Tuple = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
_a : Dict = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : int = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(a__ )
_lowerCAmelCase : Union[str, Any] = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(a__ , """tokenizers""" )
_lowerCAmelCase : Union[str, Any] = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(a__ , """tensorflow_text""" )
_lowerCAmelCase : Dict = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(a__ , """sentencepiece_and_tokenizers""" )
_lowerCAmelCase : Tuple = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(a__ , """sentencepiece_and_tensorflow_text""" )
_lowerCAmelCase : Optional[Any] = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(a__ , """sentencepiece_and_tokenizers_and_vision""" )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , a__ )
self.assertIn("""tensorflow_text""" , a__ )
self.assertIn("""sentencepiece_and_tokenizers""" , a__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def __A ( self ):
_lowerCAmelCase : Dict = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(a__ , """\nCONSTANT = None\n""" )
_lowerCAmelCase : List[Any] = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
a__ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_lowerCAmelCase : Optional[int] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
_lowerCAmelCase : Optional[int] = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : List[str] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
_lowerCAmelCase : Dict = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , a__ )
| 213 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : int
UpperCAmelCase__ : int
class lowerCamelCase__:
def __init__( self: Dict , UpperCamelCase_: int ):
__lowerCamelCase = [[] for _ in range(UpperCamelCase_ )]
__lowerCamelCase = size
def __getitem__( self: Optional[int] , UpperCamelCase_: int ):
return iter(self._graph[vertex] )
@property
def lowerCAmelCase__ ( self: Any ):
return self._size
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int ):
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int ):
__lowerCamelCase = deque([start_vertex] )
__lowerCamelCase = [None] * self.size
__lowerCamelCase = 0
while queue:
__lowerCamelCase = queue.popleft()
__lowerCamelCase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__lowerCamelCase = current_distance + edge.weight
__lowerCamelCase = distances[edge.destination_vertex]
if (
isinstance(UpperCamelCase_ , UpperCamelCase_ )
and new_distance >= dest_vertex_distance
):
continue
__lowerCamelCase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__lowerCamelCase, __lowerCamelCase = array[indexa], array[indexa]
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if length > 1:
__lowerCamelCase = int(length / 2 )
for i in range(A__ , low + middle ):
comp_and_swap(A__ , A__ , i + middle , A__ )
bitonic_merge(A__ , A__ , A__ , A__ )
bitonic_merge(A__ , low + middle , A__ , A__ )
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if length > 1:
__lowerCamelCase = int(length / 2 )
bitonic_sort(A__ , A__ , A__ , 1 )
bitonic_sort(A__ , low + middle , A__ , 0 )
bitonic_merge(A__ , A__ , A__ , A__ )
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 80 | 1 |
from math import isclose, sqrt
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Tuple = point_y / 4 / point_x
_lowerCAmelCase : Optional[Any] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_lowerCAmelCase : Tuple = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_lowerCAmelCase : Dict = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_lowerCAmelCase : Optional[Any] = outgoing_gradient**2 + 4
_lowerCAmelCase : Optional[Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_lowerCAmelCase : str = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
_lowerCAmelCase : int = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_lowerCAmelCase : Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_lowerCAmelCase : Optional[Any] = x_minus if isclose(lowerCAmelCase__ , lowerCAmelCase__ ) else x_plus
_lowerCAmelCase : Dict = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def UpperCamelCase_ ( lowerCAmelCase__ = 1.4 , lowerCAmelCase__ = -9.6 ):
"""simple docstring"""
_lowerCAmelCase : int = 0
_lowerCAmelCase : float = first_x_coord
_lowerCAmelCase : float = first_y_coord
_lowerCAmelCase : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = next_point(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 424 | from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case = TypeVar("T")
class __A ( Generic[T] ):
'''simple docstring'''
a_ = 42 # Cache store of keys
a_ = 42 # References of the keys in cache
a_ = 10 # Maximum capacity of cache
def __init__( self , _snake_case ):
_lowerCAmelCase : Tuple = deque()
_lowerCAmelCase : List[Any] = set()
if not n:
_lowerCAmelCase : Any = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
_lowerCAmelCase : List[str] = n
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_lowerCAmelCase : Optional[int] = self.dq_store.pop()
self.key_reference.remove(_snake_case )
else:
self.dq_store.remove(_snake_case )
self.dq_store.appendleft(_snake_case )
self.key_reference.add(_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for k in self.dq_store:
print(_snake_case )
def __repr__( self ):
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 424 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : int=32 , UpperCamelCase_ : Optional[Any]=3 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[Any]=[10, 20, 30, 40] , UpperCamelCase_ : List[str]=[1, 1, 2, 1] , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]="relu" , UpperCamelCase_ : str=3 , UpperCamelCase_ : List[str]=None , ):
"""simple docstring"""
__A = parent
__A = batch_size
__A = image_size
__A = num_channels
__A = embeddings_size
__A = hidden_sizes
__A = depths
__A = is_training
__A = use_labels
__A = hidden_act
__A = num_labels
__A = scope
__A = len(UpperCamelCase_ )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.num_labels )
__A = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] ):
"""simple docstring"""
__A = TFResNetModel(config=UpperCamelCase_ )
__A = model(UpperCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] ):
"""simple docstring"""
__A = self.num_labels
__A = TFResNetForImageClassification(UpperCamelCase_ )
__A = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __lowercase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = TFResNetModelTester(self )
__A = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(UpperCamelCase_ )
__A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : int ):
__A = model_class(UpperCamelCase_ )
__A = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
__A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__A = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__A = layer_type
__A = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFResNetModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
__A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=UpperCamelCase_ , return_tensors="""tf""" )
# forward pass
__A = model(**UpperCamelCase_ )
# verify the logits
__A = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
__A = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCamelCase_ , atol=1e-4 ) )
| 720 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE = "AutoImageProcessor"
SCREAMING_SNAKE_CASE = "AutoTokenizer"
def __init__( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : str ):
"""simple docstring"""
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
__A = self.image_processor
def __call__( self : Optional[Any] , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None , **UpperCamelCase_ : int ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__A = self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if images is not None:
__A = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None and images is not None:
__A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase_ ) , tensor_type=UpperCamelCase_ )
def lowerCAmelCase_ ( self : List[str] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any , *UpperCamelCase_ : int , **UpperCamelCase_ : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 199 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase : int = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 641 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _lowerCAmelCase ( UpperCamelCase__: Any ) -> Optional[int]:
"""simple docstring"""
A = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
A = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
A = 4
A = 48
A = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
A = [6, 6, 6, 6]
A = 60
A = [6, 6, 6, 6]
A = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
A = 4
A = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
A = 1
A = 1
A = 1_26
A = 7
A = 2_55.0
A = """"""
return config
def _lowerCAmelCase ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: Dict ) -> Optional[Any]:
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
A = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
A = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
A = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
A = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
A = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
A = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
A = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
A = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
A = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
A = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
A = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
A = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
A = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
A = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
A = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
A = """layernorm.weight"""
if name == "norm.bias":
A = """layernorm.bias"""
if "conv_first" in name:
A = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
A = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
A = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
A = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
A = name.replace("""upsample.2""" , """upsample.convolution_1""" )
A = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
A = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
A = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
A = """swin2sr.""" + name
return name
def _lowerCAmelCase ( UpperCamelCase__: Optional[int] , UpperCamelCase__: Union[str, Any] ) -> Tuple:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
A = key.split(""".""" )
A = int(key_split[1] )
A = int(key_split[4] )
A = config.embed_dim
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[dim : dim * 2]
A = val[-dim:]
pass
else:
A = val
return orig_state_dict
def _lowerCAmelCase ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[Any] ) -> str:
"""simple docstring"""
A = get_config(UpperCamelCase__ )
A = SwinaSRForImageSuperResolution(UpperCamelCase__ )
model.eval()
A = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" )
A = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
A , A = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(UpperCamelCase__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'Unexpected key {key} in state_dict' )
# verify values
A = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
A = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("""RGB""" )
A = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
A = 1_26 if """Jpeg""" in checkpoint_url else 2_56
A = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
A = transforms(UpperCamelCase__ ).unsqueeze(0 )
if config.num_channels == 1:
A = pixel_values[:, 0, :, :].unsqueeze(1 )
A = model(UpperCamelCase__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
A = torch.Size([1, 3, 5_12, 5_12] )
A = torch.tensor(
[[-0.70_87, -0.71_38, -0.67_21], [-0.83_40, -0.80_95, -0.72_98], [-0.91_49, -0.84_14, -0.79_40]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
A = torch.Size([1, 3, 10_24, 10_24] )
A = torch.tensor(
[[-0.77_75, -0.81_05, -0.89_33], [-0.77_64, -0.83_56, -0.92_25], [-0.79_76, -0.86_86, -0.95_79]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
A = torch.Size([1, 3, 10_24, 10_24] )
A = torch.tensor(
[[-0.80_35, -0.75_04, -0.74_91], [-0.85_38, -0.81_24, -0.77_82], [-0.88_04, -0.86_51, -0.84_93]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
A = torch.Size([1, 3, 5_12, 5_12] )
A = torch.tensor(
[[-0.76_69, -0.86_62, -0.87_67], [-0.88_10, -0.99_62, -0.98_20], [-0.93_40, -1.03_22, -1.11_49]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
A = torch.Size([1, 3, 10_24, 10_24] )
A = torch.tensor(
[[-0.52_38, -0.55_57, -0.63_21], [-0.60_16, -0.59_03, -0.63_91], [-0.62_44, -0.63_34, -0.68_89]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , UpperCamelCase__ , atol=1e-3 )
print("""Looks ok!""" )
A = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
A = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub(f'caidas/{model_name}' )
processor.push_to_hub(f'caidas/{model_name}' )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
_lowercase : Tuple = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 641 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , __A : Optional[Any] , __A : List[str]=1_3 , __A : List[str]=7 , __A : Any=True , __A : Tuple=True , __A : Any=False , __A : int=True , __A : str=9_9 , __A : Dict=3_2 , __A : Optional[int]=5 , __A : List[Any]=4 , __A : Tuple=3_7 , __A : Any="gelu" , __A : Tuple=0.1 , __A : Dict=0.1 , __A : Tuple=5_1_2 , __A : List[Any]=1_6 , __A : Union[str, Any]=2 , __A : Tuple=0.02 , __A : Optional[Any]=3 , __A : Tuple=4 , __A : Dict=None , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_labels
__UpperCamelCase = num_choices
__UpperCamelCase = scope
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Optional[Any] ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : int , __A : List[Any] , __A : Optional[Any] , __A : int , __A : Union[str, Any] , __A : Any , __A : Optional[int] ):
__UpperCamelCase = DistilBertModel(config=__A )
model.to(__A )
model.eval()
__UpperCamelCase = model(__A , __A )
__UpperCamelCase = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Tuple , __A : Optional[int] , __A : List[Any] , __A : str , __A : Tuple , __A : Union[str, Any] , __A : Optional[Any] ):
__UpperCamelCase = DistilBertForMaskedLM(config=__A )
model.to(__A )
model.eval()
__UpperCamelCase = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , __A : Union[str, Any] , __A : str , __A : str , __A : Dict , __A : List[Any] , __A : str ):
__UpperCamelCase = DistilBertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
__UpperCamelCase = model(
__A , attention_mask=__A , start_positions=__A , end_positions=__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Any , __A : Optional[int] , __A : List[Any] , __A : Dict , __A : Union[str, Any] , __A : int , __A : Any ):
__UpperCamelCase = self.num_labels
__UpperCamelCase = DistilBertForSequenceClassification(__A )
model.to(__A )
model.eval()
__UpperCamelCase = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : str , __A : Union[str, Any] , __A : Optional[int] , __A : int , __A : List[str] , __A : Tuple , __A : List[Any] ):
__UpperCamelCase = self.num_labels
__UpperCamelCase = DistilBertForTokenClassification(config=__A )
model.to(__A )
model.eval()
__UpperCamelCase = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : Tuple , __A : int , __A : str , __A : str , __A : Optional[int] , __A : List[Any] , __A : Any ):
__UpperCamelCase = self.num_choices
__UpperCamelCase = DistilBertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
__UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = model(
__A , attention_mask=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE_ : Dict =(
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : str =True
SCREAMING_SNAKE_CASE_ : Optional[Any] =True
SCREAMING_SNAKE_CASE_ : Optional[Any] =True
SCREAMING_SNAKE_CASE_ : Tuple =True
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = DistilBertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__A , dim=3_7 )
def _lowerCamelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__A )
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__A )
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__A )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__A )
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__A )
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__A )
@slow
def _lowerCamelCase ( self : List[Any] ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = DistilBertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def _lowerCamelCase ( self : str ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__UpperCamelCase = True
__UpperCamelCase = model_class(config=__A )
__UpperCamelCase = self._prepare_for_class(__A , __A )
__UpperCamelCase = torch.jit.trace(
__A , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , 'traced_model.pt' ) )
__UpperCamelCase = torch.jit.load(os.path.join(__A , 'traced_model.pt' ) , map_location=__A )
loaded(inputs_dict['input_ids'].to(__A ) , inputs_dict['attention_mask'].to(__A ) )
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = DistilBertModel.from_pretrained('distilbert-base-uncased' )
__UpperCamelCase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCamelCase = model(__A , attention_mask=__A )[0]
__UpperCamelCase = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __A )
__UpperCamelCase = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) )
| 708 |
'''simple docstring'''
a__ : dict[str, float] ={
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609_344,
"knot": 1.852,
}
a__ : dict[str, float] ={
"km/h": 1.0,
"m/s": 0.277_777_778,
"mph": 0.621_371_192,
"knot": 0.539_956_803,
}
def lowercase__ ( __lowercase : float , __lowercase : str , __lowercase : str ) -> float:
"""simple docstring"""
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__UpperCamelCase = (
F'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'''
F'''Valid values are: {', '.join(__lowercase )}'''
)
raise ValueError(__lowercase )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 434 | 0 |
def lowercase__ ( A_: list[list[int | float]] ) -> int:
"""simple docstring"""
__UpperCAmelCase =len(A_ )
__UpperCAmelCase =len(matrix[0] )
__UpperCAmelCase =min(A_ , A_ )
for row in range(A_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , A_ ):
__UpperCAmelCase =matrix[col][row] / matrix[row][row]
for i in range(A_ , A_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__UpperCAmelCase =True
for i in range(row + 1 , A_ ):
if matrix[i][row] != 0:
__UpperCAmelCase , __UpperCAmelCase =matrix[i], matrix[row]
__UpperCAmelCase =False
break
if reduce:
rank -= 1
for i in range(A_ ):
__UpperCAmelCase =matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | """simple docstring"""
from collections.abc import Generator
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = 0, 1
while True:
lowerCAmelCase__ , lowerCAmelCase__ = b, a + b
yield b
def _UpperCAmelCase ( lowerCamelCase__ = 1000 ):
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = fibonacci_generator()
while len(str(next(lowerCamelCase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 644 | 0 |
"""simple docstring"""
import math
def snake_case_ ( A_ : List[Any] = 1_00 ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = sum(i * i for i in range(1, n + 1 ) )
_lowerCamelCase : Optional[int] = int(math.pow(sum(range(1, n + 1 ) ), 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 711 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __snake_case ( _lowercase):
snake_case__ : Optional[int] = "canine"
def __init__( self : List[Any] , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : List[Any]=1_2 , __lowerCAmelCase : Optional[int]=1_2 , __lowerCAmelCase : str=3_0_7_2 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Any=1_6_3_8_4 , __lowerCAmelCase : str=1_6 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Dict=1E-12 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=0xe0_00 , __lowerCAmelCase : Optional[int]=0xe0_01 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Optional[int]=8 , __lowerCAmelCase : List[Any]=1_6_3_8_4 , __lowerCAmelCase : Optional[Any]=1_2_8 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : int = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : int = layer_norm_eps
# Character config:
_lowerCamelCase : Dict = downsampling_rate
_lowerCamelCase : str = upsampling_kernel_size
_lowerCamelCase : List[Any] = num_hash_functions
_lowerCamelCase : Dict = num_hash_buckets
_lowerCamelCase : Optional[Any] = local_transformer_stride
| 598 | 0 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
lowercase : Optional[int] = logging.getLogger(__name__)
@dataclass
class a__ ( UpperCamelCase_ ):
_A = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
_A = field(default=UpperCamelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
_A = field(
default=UpperCamelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
_A = field(default=UpperCamelCase_ , metadata={"help": "whether to use adafactor"} )
_A = field(
default=UpperCamelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
_A = field(
default=UpperCamelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
_A = field(default=UpperCamelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
_A = field(
default=UpperCamelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
_A = field(
default="linear" , metadata={"help": f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 423 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__lowerCamelCase : str = logging.get_logger("""transformers.models.speecht5""")
__lowerCamelCase : int = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
__lowerCamelCase : str = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
__lowerCamelCase : List[str] = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
__lowerCamelCase : List[str] = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
__lowerCamelCase : List[str] = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
__lowerCamelCase : List[Any] = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
__lowerCamelCase : Dict = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
__lowerCamelCase : Optional[Any] = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
__lowerCamelCase : List[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__lowerCamelCase : Optional[Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__lowerCamelCase : Optional[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__lowerCamelCase : str = []
__lowerCamelCase : List[Any] = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
__lowerCamelCase : Dict = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
__lowerCamelCase : Union[str, Any] = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
__lowerCamelCase : Union[str, Any] = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
for attribute in key.split("." ):
snake_case__ : List[str] = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : int = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_g":
snake_case__ : List[str] = value
elif weight_type == "weight_v":
snake_case__ : Any = value
elif weight_type == "bias":
snake_case__ : str = value
elif weight_type == "running_mean":
snake_case__ : Tuple = value
elif weight_type == "running_var":
snake_case__ : Any = value
elif weight_type == "num_batches_tracked":
snake_case__ : Dict = value
else:
snake_case__ : List[Any] = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Optional[Any] ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case__, snake_case__ : str = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : int , snake_case_ : int ):
snake_case__ : List[str] = []
if task == "s2t":
snake_case__ : Optional[Any] = hf_model.speechta.encoder.prenet.feature_encoder
snake_case__ : Dict = MAPPING_S2T
snake_case__ : Union[str, Any] = IGNORE_KEYS_S2T
elif task == "t2s":
snake_case__ : int = None
snake_case__ : List[Any] = MAPPING_T2S
snake_case__ : List[str] = IGNORE_KEYS_T2S
elif task == "s2s":
snake_case__ : List[str] = hf_model.speechta.encoder.prenet.feature_encoder
snake_case__ : Optional[Any] = MAPPING_S2S
snake_case__ : str = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(snake_case_ , snake_case_ ):
logger.info(F'''{name} was ignored''' )
continue
snake_case__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : int = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
snake_case__, snake_case__ : int = key.split(".*." )
if prefix in name and suffix in name:
snake_case__ : Optional[Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
snake_case__ : List[str] = True
if "*" in mapped_key:
snake_case__ : Optional[Any] = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : Any = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : Dict = "weight_g"
elif "weight_v" in name:
snake_case__ : int = "weight_v"
elif "bias" in name:
snake_case__ : Any = "bias"
elif "weight" in name:
snake_case__ : List[str] = "weight"
elif "running_mean" in name:
snake_case__ : Union[str, Any] = "running_mean"
elif "running_var" in name:
snake_case__ : Optional[Any] = "running_var"
elif "num_batches_tracked" in name:
snake_case__ : Any = "num_batches_tracked"
else:
snake_case__ : Dict = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Optional[int] ):
snake_case__ : Optional[Any] = full_name.split("conv_layers." )[-1]
snake_case__ : Union[str, Any] = name.split("." )
snake_case__ : List[Any] = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : Tuple = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : str=None , snake_case_ : int=None , snake_case_ : str=None , ):
if config_path is not None:
snake_case__ : List[Any] = SpeechTaConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Optional[Any] = SpeechTaConfig()
if task == "s2t":
snake_case__ : Optional[Any] = config.max_text_positions
snake_case__ : str = SpeechTaForSpeechToText(snake_case_ )
elif task == "t2s":
snake_case__ : Optional[int] = 1876
snake_case__ : Optional[int] = 600
snake_case__ : List[Any] = config.max_speech_positions
snake_case__ : Tuple = SpeechTaForTextToSpeech(snake_case_ )
elif task == "s2s":
snake_case__ : Any = 1876
snake_case__ : List[str] = config.max_speech_positions
snake_case__ : Dict = SpeechTaForSpeechToSpeech(snake_case_ )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
snake_case__ : Optional[int] = SpeechTaTokenizer(snake_case_ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
snake_case__ : int = AddedToken("<mask>" , lstrip=snake_case_ , rstrip=snake_case_ )
snake_case__ : Dict = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
snake_case__ : int = SpeechTaFeatureExtractor()
snake_case__ : Optional[Any] = SpeechTaProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(snake_case_ )
snake_case__ : Optional[Any] = torch.load(snake_case_ )
recursively_load_weights(fairseq_checkpoint["model"] , snake_case_ , snake_case_ )
model.save_pretrained(snake_case_ )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(snake_case_ )
model.push_to_hub(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 297 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
snake_case__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_lowerCamelCase = '''lm_head'''
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
_lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = fairseq_model.state_dict()
_lowerCamelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase = '''weight'''
else:
_lowerCamelCase = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase = UniSpeechConfig.from_pretrained(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
_lowerCamelCase = Dictionary.load_from_json(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase = target_dict.pad_index
_lowerCamelCase = target_dict.bos_index
_lowerCamelCase = target_dict.eos_index
_lowerCamelCase = len(target_dict.symbols )
_lowerCamelCase = os.path.join(__UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase = 42
_lowerCamelCase = 43
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = WavaVecaPhonemeCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCAmelCase , )
_lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
_lowerCamelCase = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
_lowerCamelCase = UniSpeechForCTC(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechForPreTraining(__UpperCAmelCase )
if is_finetuned:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowerCamelCase = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_unispeech.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
snake_case__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 712 | import argparse
import json
from tqdm import tqdm
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=__UpperCAmelCase , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=__UpperCAmelCase , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=__UpperCAmelCase , help='''where to store parsed gold_data_path file''' , )
_lowerCamelCase = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
_lowerCamelCase = json.load(__UpperCAmelCase )
for dpr_record in tqdm(__UpperCAmelCase ):
_lowerCamelCase = dpr_record['''question''']
_lowerCamelCase = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(__UpperCAmelCase ) + '''\n''' )
if __name__ == "__main__":
main() | 638 | 0 |
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def _SCREAMING_SNAKE_CASE ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 108 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__a: Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ):
'''simple docstring'''
_lowerCamelCase = None
_lowerCamelCase = "utf-8"
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = True # deprecated
_lowerCamelCase = None # deprecated
_lowerCamelCase = 10 << 20 # 10MB
_lowerCamelCase = None
class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
_lowerCamelCase = JsonConfig
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
_UpperCAmelCase = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase ( self : List[Any] , lowerCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_UpperCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase , (str, list, tuple) ):
_UpperCAmelCase = data_files
if isinstance(lowerCamelCase , lowerCamelCase ):
_UpperCAmelCase = [files]
_UpperCAmelCase = [dl_manager.iter_files(lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_UpperCAmelCase = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
_UpperCAmelCase = [files]
_UpperCAmelCase = [dl_manager.iter_files(lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCamelCase , gen_kwargs={"""files""": files} ) )
return splits
def lowerCamelCase ( self : Tuple , lowerCamelCase : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
_UpperCAmelCase = self.config.features.arrow_schema.field(lowerCamelCase ).type
_UpperCAmelCase = pa_table.append_column(lowerCamelCase , pa.array([None] * len(lowerCamelCase ) , type=lowerCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_UpperCAmelCase = table_cast(lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : Optional[int] ) -> str:
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_UpperCAmelCase = json.load(lowerCamelCase )
# We keep only the field we are interested in
_UpperCAmelCase = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(lowerCamelCase , (list, tuple) ):
_UpperCAmelCase = set().union(*[row.keys() for row in dataset] )
_UpperCAmelCase = {col: [row.get(lowerCamelCase ) for row in dataset] for col in keys}
else:
_UpperCAmelCase = dataset
_UpperCAmelCase = pa.Table.from_pydict(lowerCamelCase )
yield file_idx, self._cast_table(lowerCamelCase )
# If the file has one json object per line
else:
with open(lowerCamelCase , """rb""" ) as f:
_UpperCAmelCase = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_UpperCAmelCase = max(self.config.chunksize // 32 , 16 << 10 )
_UpperCAmelCase = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
_UpperCAmelCase = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(lowerCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_UpperCAmelCase = batch.decode(self.config.encoding , errors=lowerCamelCase ).encode("""utf-8""" )
try:
while True:
try:
_UpperCAmelCase = paj.read_json(
io.BytesIO(lowerCamelCase ) , read_options=paj.ReadOptions(block_size=lowerCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(lowerCamelCase , pa.ArrowInvalid )
and "straddling" not in str(lowerCamelCase )
or block_size > len(lowerCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(lowerCamelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_UpperCAmelCase = json.load(lowerCamelCase )
except json.JSONDecodeError:
logger.error(f"""Failed to read file '{file}' with error {type(lowerCamelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(lowerCamelCase , lowerCamelCase ): # list is the only sequence type supported in JSON
try:
_UpperCAmelCase = set().union(*[row.keys() for row in dataset] )
_UpperCAmelCase = {col: [row.get(lowerCamelCase ) for row in dataset] for col in keys}
_UpperCAmelCase = pa.Table.from_pydict(lowerCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file '{file}' with error {type(lowerCamelCase )}: {e}""" )
raise ValueError(f"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(lowerCamelCase )
break
else:
logger.error(f"""Failed to read file '{file}' with error {type(lowerCamelCase )}: {e}""" )
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCamelCase )
batch_idx += 1 | 108 | 1 |
import os
from distutils.util import strtobool
def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : Optional[Any] ):
'''simple docstring'''
for e in env_keys:
A_ : Optional[int] = int(os.environ.get(a__ ,-1 ) )
if val >= 0:
return val
return default
def UpperCamelCase ( __lowercase : Any ,__lowercase : Dict=False ):
'''simple docstring'''
A_ : Tuple = os.environ.get(a__ ,str(a__ ) )
return strtobool(a__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : Optional[Any]="no" ):
'''simple docstring'''
A_ : Any = os.environ.get(a__ ,str(a__ ) )
return value
| 715 | from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCamelCase ( ):
'''simple docstring'''
A_ , A_ : Any = 9, 14 # noqa: F841
A_ : str = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A_ : List[Any] = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
A_ : Tuple = mst(__lowercase )
A_ : Tuple = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
A_ : List[Any] = tuple(answer[:2] )
A_ : Union[str, Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 70 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Any ='''glpn'''
def __init__( self :List[Any], snake_case :List[str]=3, snake_case :Any=4, snake_case :List[str]=[2, 2, 2, 2], snake_case :Union[str, Any]=[8, 4, 2, 1], snake_case :Dict=[32, 64, 160, 256], snake_case :int=[7, 3, 3, 3], snake_case :Optional[Any]=[4, 2, 2, 2], snake_case :Optional[int]=[1, 2, 5, 8], snake_case :Optional[int]=[4, 4, 4, 4], snake_case :List[Any]="gelu", snake_case :List[Any]=0.0, snake_case :Any=0.0, snake_case :Dict=0.0_2, snake_case :List[Any]=0.1, snake_case :Any=1e-6, snake_case :Optional[Any]=64, snake_case :str=10, snake_case :int=-1, **snake_case :Tuple, ):
"""simple docstring"""
super().__init__(**snake_case)
_lowercase =num_channels
_lowercase =num_encoder_blocks
_lowercase =depths
_lowercase =sr_ratios
_lowercase =hidden_sizes
_lowercase =patch_sizes
_lowercase =strides
_lowercase =mlp_ratios
_lowercase =num_attention_heads
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =initializer_range
_lowercase =drop_path_rate
_lowercase =layer_norm_eps
_lowercase =decoder_hidden_size
_lowercase =max_depth
_lowercase =head_in_index
| 181 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : int ='''summarization'''
__lowerCAmelCase : str =['''loss''']
__lowerCAmelCase : Dict =ROUGE_KEYS
__lowerCAmelCase : str ='''rouge2'''
def __init__( self :Dict, snake_case :List[Any], **snake_case :Tuple):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
_lowercase =False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training')
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously')
super().__init__(snake_case, num_labels=snake_case, mode=self.mode, **snake_case)
use_task_specific_params(self.model, 'summarization')
save_git_info(self.hparams.output_dir)
_lowercase =Path(self.output_dir) / 'metrics.json'
_lowercase =Path(self.output_dir) / 'hparams.pkl'
pickle_save(self.hparams, self.hparams_save_path)
_lowercase =0
_lowercase =defaultdict(snake_case)
_lowercase =self.config.model_type
_lowercase =self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
_lowercase ={
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
_lowercase ={
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
_lowercase ={k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_lowercase ={
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model)
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder())
assert_all_frozen(self.model.get_encoder())
_lowercase =get_git_info()['repo_sha']
_lowercase =hparams.num_workers
_lowercase =None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer, snake_case):
_lowercase =self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_lowercase =self.decoder_start_token_id
_lowercase =(
SeqaSeqDataset if hasattr(self.tokenizer, 'prepare_seq2seq_batch') else LegacySeqaSeqDataset
)
_lowercase =False
_lowercase =self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_lowercase =self.hparams.eval_max_gen_length
else:
_lowercase =self.model.config.max_length
_lowercase =self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCamelCase__ ( self :str, snake_case :Dict[str, torch.Tensor]):
"""simple docstring"""
_lowercase ={
k: self.tokenizer.batch_decode(v.tolist()) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(snake_case, Path(self.output_dir) / 'text_batch.json')
save_json({k: v.tolist() for k, v in batch.items()}, Path(self.output_dir) / 'tok_batch.json')
_lowercase =True
return readable_batch
def UpperCamelCase__ ( self :Dict, snake_case :List[str], **snake_case :List[Any]):
"""simple docstring"""
return self.model(snake_case, **snake_case)
def UpperCamelCase__ ( self :Any, snake_case :List[int]):
"""simple docstring"""
_lowercase =self.tokenizer.batch_decode(
snake_case, skip_special_tokens=snake_case, clean_up_tokenization_spaces=snake_case)
return lmap(str.strip, snake_case)
def UpperCamelCase__ ( self :Union[str, Any], snake_case :dict):
"""simple docstring"""
_lowercase =self.tokenizer.pad_token_id
_lowercase , _lowercase =batch['input_ids'], batch['attention_mask']
_lowercase =batch['labels']
if isinstance(self.model, snake_case):
_lowercase =self.model._shift_right(snake_case)
else:
_lowercase =shift_tokens_right(snake_case, snake_case)
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_lowercase =decoder_input_ids
self.save_readable_batch(snake_case)
_lowercase =self(snake_case, attention_mask=snake_case, decoder_input_ids=snake_case, use_cache=snake_case)
_lowercase =outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_lowercase =nn.CrossEntropyLoss(ignore_index=snake_case)
assert lm_logits.shape[-1] == self.vocab_size
_lowercase =ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), tgt_ids.view(-1))
else:
_lowercase =nn.functional.log_softmax(snake_case, dim=-1)
_lowercase , _lowercase =label_smoothed_nll_loss(
snake_case, snake_case, self.hparams.label_smoothing, ignore_index=snake_case)
return (loss,)
@property
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
return self.tokenizer.pad_token_id
def UpperCamelCase__ ( self :Tuple, snake_case :Dict, snake_case :str):
"""simple docstring"""
_lowercase =self._step(snake_case)
_lowercase =dict(zip(self.loss_names, snake_case))
# tokens per batch
_lowercase =batch['input_ids'].ne(self.pad).sum() + batch['labels'].ne(self.pad).sum()
_lowercase =batch['input_ids'].shape[0]
_lowercase =batch['input_ids'].eq(self.pad).sum()
_lowercase =batch['input_ids'].eq(self.pad).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCamelCase__ ( self :List[Any], snake_case :Dict, snake_case :List[Any]):
"""simple docstring"""
return self._generative_step(snake_case)
def UpperCamelCase__ ( self :List[Any], snake_case :List[str], snake_case :str="val"):
"""simple docstring"""
self.step_count += 1
_lowercase ={k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
_lowercase =losses['loss']
_lowercase ={
k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
_lowercase =(
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_lowercase =torch.tensor(snake_case).type_as(snake_case)
generative_metrics.update({k: v.item() for k, v in losses.items()})
losses.update(snake_case)
_lowercase ={f'''{prefix}_avg_{k}''': x for k, x in losses.items()}
_lowercase =self.step_count
self.metrics[prefix].append(snake_case) # callback writes this to self.metrics_save_path
_lowercase =flatten_list([x['preds'] for x in outputs])
return {
"log": all_metrics,
"preds": preds,
f'''{prefix}_loss''': loss,
f'''{prefix}_{self.val_metric}''': metric_tensor,
}
def UpperCamelCase__ ( self :Any, snake_case :str, snake_case :List[str]):
"""simple docstring"""
return calculate_rouge(snake_case, snake_case)
def UpperCamelCase__ ( self :str, snake_case :dict):
"""simple docstring"""
_lowercase =time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_lowercase =self.model.generate(
batch['input_ids'], attention_mask=batch['attention_mask'], use_cache=snake_case, decoder_start_token_id=self.decoder_start_token_id, num_beams=self.eval_beams, max_length=self.eval_max_length, )
_lowercase =(time.time() - ta) / batch['input_ids'].shape[0]
_lowercase =self.ids_to_clean_text(snake_case)
_lowercase =self.ids_to_clean_text(batch['labels'])
_lowercase =self._step(snake_case)
_lowercase =dict(zip(self.loss_names, snake_case))
_lowercase =self.calc_generative_metrics(snake_case, snake_case)
_lowercase =np.mean(lmap(snake_case, snake_case))
base_metrics.update(gen_time=snake_case, gen_len=snake_case, preds=snake_case, target=snake_case, **snake_case)
return base_metrics
def UpperCamelCase__ ( self :Optional[Any], snake_case :str, snake_case :Optional[int]):
"""simple docstring"""
return self._generative_step(snake_case)
def UpperCamelCase__ ( self :Union[str, Any], snake_case :List[str]):
"""simple docstring"""
return self.validation_epoch_end(snake_case, prefix='test')
def UpperCamelCase__ ( self :List[Any], snake_case :str):
"""simple docstring"""
_lowercase =self.n_obs[type_path]
_lowercase =self.target_lens[type_path]
_lowercase =self.dataset_class(
self.tokenizer, type_path=snake_case, n_obs=snake_case, max_target_length=snake_case, **self.dataset_kwargs, )
return dataset
def UpperCamelCase__ ( self :str, snake_case :str, snake_case :int, snake_case :bool = False):
"""simple docstring"""
_lowercase =self.get_dataset(snake_case)
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_lowercase =dataset.make_sortish_sampler(snake_case, distributed=self.hparams.gpus > 1)
return DataLoader(
snake_case, batch_size=snake_case, collate_fn=dataset.collate_fn, shuffle=snake_case, num_workers=self.num_workers, sampler=snake_case, )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_lowercase =dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch, distributed=self.hparams.gpus > 1)
return DataLoader(
snake_case, batch_sampler=snake_case, collate_fn=dataset.collate_fn, num_workers=self.num_workers, )
else:
return DataLoader(
snake_case, batch_size=snake_case, collate_fn=dataset.collate_fn, shuffle=snake_case, num_workers=self.num_workers, sampler=snake_case, )
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
_lowercase =self.get_dataloader('train', batch_size=self.hparams.train_batch_size, shuffle=snake_case)
return dataloader
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
return self.get_dataloader('val', batch_size=self.hparams.eval_batch_size)
def UpperCamelCase__ ( self :int):
"""simple docstring"""
return self.get_dataloader('test', batch_size=self.hparams.eval_batch_size)
@staticmethod
def UpperCamelCase__ ( snake_case :Dict, snake_case :List[str]):
"""simple docstring"""
BaseTransformer.add_model_specific_args(snake_case, snake_case)
add_generic_args(snake_case, snake_case)
parser.add_argument(
'--max_source_length', default=1024, type=snake_case, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument(
'--max_target_length', default=56, type=snake_case, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument(
'--val_max_target_length', default=142, type=snake_case, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument(
'--test_max_target_length', default=142, type=snake_case, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument('--freeze_encoder', action='store_true')
parser.add_argument('--freeze_embeds', action='store_true')
parser.add_argument('--sortish_sampler', action='store_true', default=snake_case)
parser.add_argument('--overwrite_output_dir', action='store_true', default=snake_case)
parser.add_argument('--max_tokens_per_batch', type=snake_case, default=snake_case)
parser.add_argument('--logger_name', type=snake_case, choices=['default', 'wandb', 'wandb_shared'], default='default')
parser.add_argument('--n_train', type=snake_case, default=-1, required=snake_case, help='# examples. -1 means use all.')
parser.add_argument('--n_val', type=snake_case, default=500, required=snake_case, help='# examples. -1 means use all.')
parser.add_argument('--n_test', type=snake_case, default=-1, required=snake_case, help='# examples. -1 means use all.')
parser.add_argument(
'--task', type=snake_case, default='summarization', required=snake_case, help='# examples. -1 means use all.')
parser.add_argument('--label_smoothing', type=snake_case, default=0.0, required=snake_case)
parser.add_argument('--src_lang', type=snake_case, default='', required=snake_case)
parser.add_argument('--tgt_lang', type=snake_case, default='', required=snake_case)
parser.add_argument('--eval_beams', type=snake_case, default=snake_case, required=snake_case)
parser.add_argument(
'--val_metric', type=snake_case, default=snake_case, required=snake_case, choices=['bleu', 'rouge2', 'loss', None])
parser.add_argument('--eval_max_gen_length', type=snake_case, default=snake_case, help='never generate more than n tokens')
parser.add_argument('--save_top_k', type=snake_case, default=1, required=snake_case, help='How many checkpoints to save')
parser.add_argument(
'--early_stopping_patience', type=snake_case, default=-1, required=snake_case, help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
), )
return parser
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] ='''translation'''
__lowerCAmelCase : Any =['''loss''']
__lowerCAmelCase : Optional[int] =['''bleu''']
__lowerCAmelCase : Any ='''bleu'''
def __init__( self :str, snake_case :Union[str, Any], **snake_case :Any):
"""simple docstring"""
super().__init__(snake_case, **snake_case)
_lowercase =hparams.src_lang
_lowercase =hparams.tgt_lang
def UpperCamelCase__ ( self :Dict, snake_case :Union[str, Any], snake_case :Any):
"""simple docstring"""
return calculate_bleu(snake_case, snake_case)
def _snake_case (_snake_case : Dict , _snake_case : int=None) -> SummarizationModule:
Path(args.output_dir).mkdir(exist_ok=_snake_case)
check_output_dir(_snake_case , expected_items=3)
if model is None:
if "summarization" in args.task:
_lowercase =SummarizationModule(_snake_case)
else:
_lowercase =TranslationModule(_snake_case)
_lowercase =Path(args.data_dir).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir).startswith('/tmp')
or str(args.output_dir).startswith('/var')
):
_lowercase =True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_lowercase =os.environ.get('WANDB_PROJECT' , _snake_case)
_lowercase =WandbLogger(name=model.output_dir.name , project=_snake_case)
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_lowercase =WandbLogger(name=model.output_dir.name , project=f'''hf_{dataset}''')
if args.early_stopping_patience >= 0:
_lowercase =get_early_stopping_callback(model.val_metric , args.early_stopping_patience)
else:
_lowercase =False
_lowercase =args.val_metric == 'loss'
_lowercase =generic_train(
_snake_case , _snake_case , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _snake_case) , early_stopping_callback=_snake_case , logger=_snake_case , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl')
if not args.do_predict:
return model
_lowercase =''
_lowercase =sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt') , recursive=_snake_case))
if checkpoints:
_lowercase =checkpoints[-1]
_lowercase =checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams)
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE = pl.Trainer.add_argparse_args(parser)
_SCREAMING_SNAKE_CASE = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 181 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : str="cls" , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=True , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
__UpperCAmelCase : Tuple = project_dim
__UpperCAmelCase : Any = pooler_fn
__UpperCAmelCase : Tuple = learn_encoder
__UpperCAmelCase : Optional[int] = use_attention_mask
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [R'''pooler''', R'''logit_scale''']
SCREAMING_SNAKE_CASE = [R'''position_ids''', R'''predictions.decoder.bias''']
SCREAMING_SNAKE_CASE = '''roberta'''
SCREAMING_SNAKE_CASE = RobertaSeriesConfig
def __init__( self : Tuple , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
super().__init__(UpperCAmelCase_ )
__UpperCAmelCase : List[str] = XLMRobertaModel(UpperCAmelCase_ )
__UpperCAmelCase : Any = nn.Linear(config.hidden_size , config.project_dim )
__UpperCAmelCase : Optional[Any] = getattr(UpperCAmelCase_ , "has_pre_transformation" , UpperCAmelCase_ )
if self.has_pre_transformation:
__UpperCAmelCase : List[str] = nn.Linear(config.hidden_size , config.project_dim )
__UpperCAmelCase : List[str] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase : List[Any] = self.base_model(
input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , output_attentions=UpperCAmelCase_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCAmelCase_ , )
if self.has_pre_transformation:
__UpperCAmelCase : Tuple = outputs["hidden_states"][-2]
__UpperCAmelCase : int = self.pre_LN(UpperCAmelCase_ )
__UpperCAmelCase : Any = self.transformation_pre(UpperCAmelCase_ )
return TransformationModelOutput(
projection_state=UpperCAmelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__UpperCAmelCase : Dict = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=UpperCAmelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 701 |
'''simple docstring'''
lowerCAmelCase__ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCAmelCase__ : int = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCAmelCase__ : List[Any] = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
assert len(str(_UpperCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__UpperCAmelCase : Optional[int] = year // 100
__UpperCAmelCase : int = (5 * (century % 4) + 2) % 7
__UpperCAmelCase : Optional[Any] = year % 100
__UpperCAmelCase : int = centurian % 12
__UpperCAmelCase : Optional[Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__UpperCAmelCase : Union[str, Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__UpperCAmelCase : Optional[Any] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def _a ( _lowerCamelCase , _lowerCamelCase ) -> bool:
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _a ( _lowerCamelCase ) -> list[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = []
__snake_case : Dict = 11
__snake_case : List[Any] = int("""1""" + """0""" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
__snake_case : str = 10
return solutions
def _a ( _lowerCamelCase = 2 ) -> int:
"""simple docstring"""
__snake_case : List[Any] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
__snake_case : List[Any] = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 26 |
'''simple docstring'''
import sys
__UpperCamelCase : List[str] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str = N ) -> int:
"""simple docstring"""
__a = -sys.maxsize - 1
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 12 ):
__a = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
__a = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""") | 448 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 4_2
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline | 715 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Tuple:
# Initialise PyTorch model
__lowerCAmelCase : Any = AlbertConfig.from_json_file(__snake_case )
print(F"""Building PyTorch model from configuration: {config}""" )
__lowerCAmelCase : str = AlbertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_albert(__snake_case ,__snake_case ,__snake_case )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() ,__snake_case )
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__snake_case : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path) | 615 | 0 |
"""simple docstring"""
from PIL import Image
def snake_case ( _a: List[Any] )-> Image:
'''simple docstring'''
lowerCamelCase__ = image.size
lowerCamelCase__ = 0
lowerCamelCase__ = image.load()
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
lowerCamelCase__ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCAmelCase ):
for i in range(_lowerCAmelCase ):
lowerCamelCase__ = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_snake_case = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 510 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'vocab_file': 'vocab.json'}
_UpperCamelCase = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_UpperCamelCase = {'mgp-str': 27}
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] , _a : Optional[Any] , _a : Any="[GO]" , _a : Optional[Any]="[GO]" , _a : Optional[int]="[s]" , _a : Optional[int]="[GO]" , **_a : Union[str, Any] ) -> Optional[int]:
super().__init__(
unk_token=_a , bos_token=_a , eos_token=_a , pad_token=_a , **_a , )
with open(_a , encoding='utf-8' ) as vocab_handle:
__lowerCamelCase : List[str] = json.load(_a )
__lowerCamelCase : Tuple = {v: k for k, v in self.vocab.items()}
@property
def _lowercase ( self : Union[str, Any] ) -> List[str]:
return len(self.vocab )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
return dict(self.vocab , **self.added_tokens_encoder )
def _lowercase ( self : Dict , _a : Tuple ) -> int:
__lowerCamelCase : Optional[Any] = []
for s in text:
char_tokens.extend(_a )
return char_tokens
def _lowercase ( self : Optional[Any] , _a : Any ) -> Dict:
return self.vocab.get(_a , self.vocab.get(self.unk_token ) )
def _lowercase ( self : List[Any] , _a : Dict ) -> Optional[int]:
return self.decoder.get(_a )
def _lowercase ( self : Union[str, Any] , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error('Vocabulary path ({}) should be a directory'.format(_a ) )
return
__lowerCamelCase : List[Any] = os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(_a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_a , ensure_ascii=_a ) + '\n' )
return (vocab_file,)
| 459 | 0 |
def __lowerCAmelCase ( snake_case : int , snake_case : int ) -> List[str]:
while a != 0:
__lowerCamelCase: str = b % a, a
return b
def __lowerCAmelCase ( snake_case : int , snake_case : int ) -> Optional[int]:
if gcd(_lowerCamelCase , _lowerCamelCase ) != 1:
__lowerCamelCase: List[str] = f'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(_lowerCamelCase )
__lowerCamelCase: int = 1, 0, a
__lowerCamelCase: Tuple = 0, 1, m
while va != 0:
__lowerCamelCase: int = ua // va
__lowerCamelCase: Dict = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 704 |
import math
def __lowerCAmelCase ( snake_case : int ) -> bool:
__lowerCamelCase: Dict = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(snake_case )
def __lowerCAmelCase ( snake_case : float = 1 / 12345 ) -> int:
__lowerCamelCase: str = 0
__lowerCamelCase: Optional[Any] = 0
__lowerCamelCase: str = 3
while True:
__lowerCamelCase: int = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(snake_case ):
__lowerCamelCase: Union[str, Any] = int(snake_case )
total_partitions += 1
if check_partition_perfect(snake_case ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(snake_case )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 189 | 0 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _lowerCAmelCase ( nn.Module ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 0.0
_UpperCAmelCase = 1
_UpperCAmelCase = 1
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = jnp.floataa
def snake_case ( self : int ):
lowerCamelCase :List[str] = []
lowerCamelCase :Optional[Any] = []
for i in range(self.num_layers ):
lowerCamelCase :List[Any] = self.in_channels if i == 0 else self.out_channels
lowerCamelCase :int = FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
lowerCamelCase :Tuple = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
lowerCamelCase :str = resnets
lowerCamelCase :Tuple = attentions
if self.add_downsample:
lowerCamelCase :str = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Dict , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : str , __snake_case : int=True ):
lowerCamelCase :Any = ()
for resnet, attn in zip(self.resnets , self.attentions ):
lowerCamelCase :Tuple = resnet(__snake_case , __snake_case , deterministic=__snake_case )
lowerCamelCase :List[str] = attn(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
lowerCamelCase :Optional[Any] = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class _lowerCAmelCase ( nn.Module ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 0.0
_UpperCAmelCase = 1
_UpperCAmelCase = True
_UpperCAmelCase = jnp.floataa
def snake_case ( self : Optional[Any] ):
lowerCamelCase :List[str] = []
for i in range(self.num_layers ):
lowerCamelCase :List[Any] = self.in_channels if i == 0 else self.out_channels
lowerCamelCase :List[Any] = FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
lowerCamelCase :Any = resnets
if self.add_downsample:
lowerCamelCase :List[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Tuple , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Union[str, Any]=True ):
lowerCamelCase :Dict = ()
for resnet in self.resnets:
lowerCamelCase :Dict = resnet(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
lowerCamelCase :int = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class _lowerCAmelCase ( nn.Module ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 0.0
_UpperCAmelCase = 1
_UpperCAmelCase = 1
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = jnp.floataa
def snake_case ( self : List[str] ):
lowerCamelCase :str = []
lowerCamelCase :str = []
for i in range(self.num_layers ):
lowerCamelCase :List[str] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowerCamelCase :List[str] = self.prev_output_channel if i == 0 else self.out_channels
lowerCamelCase :Union[str, Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
lowerCamelCase :int = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
lowerCamelCase :List[Any] = resnets
lowerCamelCase :Tuple = attentions
if self.add_upsample:
lowerCamelCase :Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : List[str]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
lowerCamelCase :List[Any] = res_hidden_states_tuple[-1]
lowerCamelCase :List[Any] = res_hidden_states_tuple[:-1]
lowerCamelCase :Dict = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowerCamelCase :Optional[Any] = resnet(__snake_case , __snake_case , deterministic=__snake_case )
lowerCamelCase :Dict = attn(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
lowerCamelCase :List[Any] = self.upsamplers_a(__snake_case )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 0.0
_UpperCAmelCase = 1
_UpperCAmelCase = True
_UpperCAmelCase = jnp.floataa
def snake_case ( self : Optional[int] ):
lowerCamelCase :str = []
for i in range(self.num_layers ):
lowerCamelCase :Dict = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowerCamelCase :Tuple = self.prev_output_channel if i == 0 else self.out_channels
lowerCamelCase :Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
lowerCamelCase :int = resnets
if self.add_upsample:
lowerCamelCase :List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Dict , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Optional[Any]=True ):
for resnet in self.resnets:
# pop res hidden states
lowerCamelCase :str = res_hidden_states_tuple[-1]
lowerCamelCase :List[Any] = res_hidden_states_tuple[:-1]
lowerCamelCase :Dict = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowerCamelCase :Optional[Any] = resnet(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
lowerCamelCase :Optional[int] = self.upsamplers_a(__snake_case )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
_UpperCAmelCase = 42
_UpperCAmelCase = 0.0
_UpperCAmelCase = 1
_UpperCAmelCase = 1
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = jnp.floataa
def snake_case ( self : Tuple ):
# there is always at least one resnet
lowerCamelCase :List[str] = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
lowerCamelCase :Optional[Any] = []
for _ in range(self.num_layers ):
lowerCamelCase :Any = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
lowerCamelCase :List[str] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
lowerCamelCase :List[str] = resnets
lowerCamelCase :List[str] = attentions
def __call__( self : Dict , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Optional[int]=True ):
lowerCamelCase :Optional[int] = self.resnets[0](__snake_case , __snake_case )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
lowerCamelCase :Optional[Any] = attn(__snake_case , __snake_case , deterministic=__snake_case )
lowerCamelCase :List[Any] = resnet(__snake_case , __snake_case , deterministic=__snake_case )
return hidden_states
| 166 | from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__snake_case , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(__snake_case , '''num_heads''' ) )
class _lowerCAmelCase :
def __init__( self : int , __snake_case : List[Any] , __snake_case : Union[str, Any]=13 , __snake_case : Any=64 , __snake_case : int=3 , __snake_case : Optional[Any]=[16, 48, 96] , __snake_case : Tuple=[1, 3, 6] , __snake_case : Optional[Any]=[1, 2, 10] , __snake_case : Tuple=[7, 3, 3] , __snake_case : Optional[int]=[4, 2, 2] , __snake_case : Union[str, Any]=[2, 1, 1] , __snake_case : Optional[int]=[2, 2, 2] , __snake_case : List[str]=[False, False, True] , __snake_case : List[Any]=[0.0, 0.0, 0.0] , __snake_case : Dict=0.0_2 , __snake_case : List[Any]=1e-1_2 , __snake_case : List[str]=True , __snake_case : List[str]=True , __snake_case : Any=2 , ):
lowerCamelCase :List[str] = parent
lowerCamelCase :str = batch_size
lowerCamelCase :Union[str, Any] = image_size
lowerCamelCase :List[str] = patch_sizes
lowerCamelCase :int = patch_stride
lowerCamelCase :List[Any] = patch_padding
lowerCamelCase :int = is_training
lowerCamelCase :Optional[Any] = use_labels
lowerCamelCase :int = num_labels
lowerCamelCase :Optional[Any] = num_channels
lowerCamelCase :int = embed_dim
lowerCamelCase :List[Any] = num_heads
lowerCamelCase :List[str] = stride_kv
lowerCamelCase :List[str] = depth
lowerCamelCase :Tuple = cls_token
lowerCamelCase :Optional[Any] = attention_drop_rate
lowerCamelCase :List[str] = initializer_range
lowerCamelCase :List[str] = layer_norm_eps
def snake_case ( self : Optional[Any] ):
lowerCamelCase :List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase :List[str] = None
if self.use_labels:
# create a random int32 tensor of given shape
lowerCamelCase :Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase :Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Optional[int] ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def snake_case ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Dict ):
lowerCamelCase :Tuple = TFCvtModel(config=__snake_case )
lowerCamelCase :str = model(__snake_case , training=__snake_case )
lowerCamelCase :List[Any] = (self.image_size, self.image_size)
lowerCamelCase , lowerCamelCase :List[str] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowerCamelCase :Optional[int] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowerCamelCase :str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def snake_case ( self : int , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : List[Any] ):
lowerCamelCase :Optional[Any] = self.num_labels
lowerCamelCase :Tuple = TFCvtForImageClassification(__snake_case )
lowerCamelCase :List[str] = model(__snake_case , labels=__snake_case , training=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : int ):
lowerCamelCase :Union[str, Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase :List[str] = config_and_inputs
lowerCamelCase :Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = TFCvtModelTester(self )
lowerCamelCase :Optional[Any] = TFCvtConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case ( self : str ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def snake_case ( self : Tuple ):
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def snake_case ( self : str ):
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def snake_case ( self : Dict ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def snake_case ( self : List[Any] ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def snake_case ( self : Tuple ):
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def snake_case ( self : Tuple ):
lowerCamelCase :List[Any] = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(__snake_case )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def snake_case ( self : Optional[Any] ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Any = model_class(__snake_case )
lowerCamelCase :Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :int = [*signature.parameters.keys()]
lowerCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case ( self : Tuple ):
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Any ):
lowerCamelCase :Dict = model_class(__snake_case )
lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :str = outputs.hidden_states
lowerCamelCase :Dict = len(self.model_tester.depth )
self.assertEqual(len(__snake_case ) , __snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Dict = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase :Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def snake_case ( self : List[Any] ):
lowerCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : List[str] ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def snake_case ( self : List[Any] ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Union[str, Any] = TFCvtModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def _lowerCamelCase ( ):
lowerCamelCase :Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def snake_case ( self : List[Any] ):
lowerCamelCase :List[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase :str = self.default_image_processor
lowerCamelCase :str = prepare_img()
lowerCamelCase :Optional[Any] = image_processor(images=__snake_case , return_tensors='''tf''' )
# forward pass
lowerCamelCase :List[str] = model(**__snake_case )
# verify the logits
lowerCamelCase :Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase :List[str] = tf.constant([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __snake_case , atol=1e-4 ) )
| 166 | 1 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowercase : Dict = object()
# For specifying empty leaf dict `{}`
_lowercase : Any = object()
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ) -> Dict:
lowercase_ : str = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(UpperCAmelCase__ ) - len(UpperCAmelCase__ ) + 1 ):
lowercase_ : Any = [x.match(UpperCAmelCase__ ) for x, y in zip(UpperCAmelCase__ , ks[i:] )]
if matches and all(UpperCAmelCase__ ):
return True
return False
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
def replace(UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ):
for rule, replacement in rules:
if _match(UpperCAmelCase__ , UpperCAmelCase__ ):
return replacement
return val
return replace
def lowerCamelCase ( ) -> Union[str, Any]:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , UpperCAmelCase__ )),
(("transformer", "wte", "embedding"), P("""mp""" , UpperCAmelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCAmelCase__ , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , UpperCAmelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCAmelCase__ , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , UpperCAmelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase ( UpperCAmelCase__ : List[str] ) -> str:
lowercase_ : Dict = _get_partition_rules()
lowercase_ : Optional[int] = _replacement_rules(UpperCAmelCase__ )
lowercase_ : Dict = {k: _unmatched for k in flatten_dict(UpperCAmelCase__ )}
lowercase_ : Tuple = {k: replace(UpperCAmelCase__ , UpperCAmelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCAmelCase__ ) )
| 30 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30 | 1 |
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : Dict[Optional[str], Type[Formatter]] = {}
A_ : Dict[Optional[str], str] = {}
A_ : Dict[Optional[str], Exception] = {}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
__UpperCAmelCase = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
__UpperCAmelCase = format_type
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__UpperCAmelCase = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
A_ : List[str] = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
A_ : Tuple = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
A_ : Optional[int] = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __a ( SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = get_format_type_from_alias(_a )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_a )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 303 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
def __init__( self : Optional[int] , lowercase_ : Dict , lowercase_ : List[str]=2 , lowercase_ : str=8 , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=True , lowercase_ : List[str]=True , lowercase_ : int=99 , lowercase_ : List[Any]=16 , lowercase_ : Tuple=5 , lowercase_ : Optional[int]=2 , lowercase_ : List[Any]=36 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : Tuple=0.0 , lowercase_ : List[Any]=0.0 , lowercase_ : List[Any]=512 , lowercase_ : Optional[int]=16 , lowercase_ : int=2 , lowercase_ : Any=0.02 , lowercase_ : Any=3 , lowercase_ : Any=4 , lowercase_ : int=None , ):
snake_case_ : int = parent
snake_case_ : List[Any] = batch_size
snake_case_ : Optional[Any] = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Optional[int] = use_input_mask
snake_case_ : Optional[int] = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : Dict = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : Any = type_vocab_size
snake_case_ : Optional[Any] = type_sequence_label_size
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Any = num_labels
snake_case_ : Optional[int] = num_choices
snake_case_ : Optional[Any] = scope
def _snake_case ( self : List[str] ):
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : str = None
if self.use_input_mask:
snake_case_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : int = None
if self.use_token_type_ids:
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : str = None
snake_case_ : Tuple = None
snake_case_ : List[str] = None
if self.use_labels:
snake_case_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : str = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : Optional[int] ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def _snake_case ( self : int ):
snake_case_ : Dict = self.get_config()
snake_case_ : List[str] = 300
return config
def _snake_case ( self : Dict ):
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : Optional[Any] = self.prepare_config_and_inputs()
snake_case_ : List[Any] = True
snake_case_ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _snake_case ( self : str , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : List[str] ):
snake_case_ : Any = MraModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
snake_case_ : List[str] = model(lowercase_ , token_type_ids=lowercase_ )
snake_case_ : Dict = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Optional[Any] , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[Any] , ):
snake_case_ : Any = True
snake_case_ : List[Any] = MraModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Tuple = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
snake_case_ : Any = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , encoder_hidden_states=lowercase_ , )
snake_case_ : List[str] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Tuple , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : List[str] ):
snake_case_ : List[Any] = MraForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Tuple ):
snake_case_ : List[Any] = MraForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : int = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] ):
snake_case_ : int = self.num_labels
snake_case_ : int = MraForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : List[str] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : str ):
snake_case_ : List[Any] = self.num_labels
snake_case_ : Union[str, Any] = MraForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self : Dict , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Optional[Any] ):
snake_case_ : Union[str, Any] = self.num_choices
snake_case_ : int = MraForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : str = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : Optional[Any] = config_and_inputs
snake_case_ : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Any = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : Any = False
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : str = False
_lowerCAmelCase : List[Any] = ()
def _snake_case ( self : Optional[int] ):
snake_case_ : Any = MraModelTester(self )
snake_case_ : int = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def _snake_case ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self : Tuple ):
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _snake_case ( self : str ):
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : Dict = type
self.model_tester.create_and_check_model(*lowercase_ )
def _snake_case ( self : Dict ):
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def _snake_case ( self : Tuple ):
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
def _snake_case ( self : Optional[int] ):
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def _snake_case ( self : Optional[Any] ):
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def _snake_case ( self : str ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def _snake_case ( self : Dict ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Union[str, Any] = MraModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip(reason='''MRA does not output attentions''' )
def _snake_case ( self : Optional[Any] ):
return
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
@slow
def _snake_case ( self : Optional[Any] ):
snake_case_ : Union[str, Any] = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
snake_case_ : Union[str, Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ : str = model(lowercase_ )[0]
snake_case_ : Optional[int] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , lowercase_ )
snake_case_ : int = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
@slow
def _snake_case ( self : List[Any] ):
snake_case_ : Optional[Any] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
snake_case_ : Union[str, Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ : Optional[int] = model(lowercase_ )[0]
snake_case_ : Optional[Any] = 50265
snake_case_ : Optional[Any] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , lowercase_ )
snake_case_ : Optional[int] = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
@slow
def _snake_case ( self : Optional[int] ):
snake_case_ : Any = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
snake_case_ : Optional[int] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ : Tuple = model(lowercase_ )[0]
snake_case_ : List[str] = 50265
snake_case_ : Optional[Any] = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , lowercase_ )
snake_case_ : Tuple = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
| 123 | 0 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowerCAmelCase__ ( _a : str , _a : int=() , _a : List[Any]=None , _a : List[Any]="no" , _a : Union[str, Any]="29500" ):
snake_case_ : str = False
snake_case_ : List[str] = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
snake_case_ : Tuple = True
elif "IPython" in sys.modules:
snake_case_ : Optional[Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
snake_case_ : List[str] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , _a ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
snake_case_ : int = 8
snake_case_ : int = PrepareForLaunch(_a , distributed_type="TPU" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(_a , args=_a , nprocs=_a , start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*_a )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_a , master_addr="127.0.01" , master_port=_a , mixed_precision=_a ):
snake_case_ : Union[str, Any] = PrepareForLaunch(_a , distributed_type="MULTI_GPU" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(_a , args=_a , nprocs=_a , start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
snake_case_ : Tuple = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*_a )
def lowerCAmelCase__ ( _a : Optional[Any] , _a : str=() , _a : int=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_a , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ):
snake_case_ : Tuple = PrepareForLaunch(_a , debug=_a )
start_processes(_a , args=_a , nprocs=_a , start_method="fork" )
| 114 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowercase : Optional[Any] = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowercase : Optional[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowercase : Union[str, Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def lowerCAmelCase__ ( _a : str , _a : str ):
snake_case_ : Union[str, Any] = len([g for position, g in enumerate(_a ) if g == main_target[position]] )
return (item, float(_a ))
def lowerCAmelCase__ ( _a : str , _a : str ):
snake_case_ : Tuple = random.randint(0 , len(_a ) - 1 )
snake_case_ : Any = parent_a[:random_slice] + parent_a[random_slice:]
snake_case_ : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCAmelCase__ ( _a : str , _a : list[str] ):
snake_case_ : str = list(_a )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
snake_case_ : Optional[Any] = random.choice(_a )
return "".join(_a )
def lowerCAmelCase__ ( _a : tuple[str, float] , _a : list[tuple[str, float]] , _a : list[str] , ):
snake_case_ : Tuple = []
# Generate more children proportionally to the fitness score.
snake_case_ : Optional[int] = int(parent_a[1] * 1_00 ) + 1
snake_case_ : Tuple = 10 if child_n >= 10 else child_n
for _ in range(_a ):
snake_case_ : int = population_score[random.randint(0 , _a )][0]
snake_case_ , snake_case_ : Any = crossover(parent_a[0] , _a )
# Append new string to the population list.
pop.append(mutate(_a , _a ) )
pop.append(mutate(_a , _a ) )
return pop
def lowerCAmelCase__ ( _a : str , _a : list[str] , _a : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
snake_case_ : Any = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(_a )
# Verify that the target contains no genes besides the ones inside genes variable.
snake_case_ : Optional[Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
snake_case_ : str = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(_a )
# Generate random starting population.
snake_case_ : Optional[int] = []
for _ in range(_a ):
population.append("".join([random.choice(_a ) for i in range(len(_a ) )] ) )
# Just some logs to know what the algorithms is doing.
snake_case_ , snake_case_ : Dict = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_a )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
snake_case_ : Optional[int] = [evaluate(_a , _a ) for item in population]
# Check if there is a matching evolution.
snake_case_ : Any = sorted(_a , key=lambda _a : x[1] , reverse=_a )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
snake_case_ : Union[str, Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_a )
# Normalize population score to be between 0 and 1.
snake_case_ : int = [
(item, score / len(_a )) for item, score in population_score
]
# This is selection
for i in range(_a ):
population.extend(select(population_score[int(_a )] , _a , _a ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_a ) > N_POPULATION:
break
if __name__ == "__main__":
lowercase : Dict = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
lowercase : Optional[int] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
lowercase ,lowercase ,lowercase : Optional[Any] = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 114 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(_lowerCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_lowerCamelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_lowerCamelCase ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class A_ ( _a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self: Dict ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Dict[str, int] = None ,__lowerCAmelCase: PILImageResampling = PILImageResampling.BILINEAR ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Dict[str, int] = None ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Union[int, float] = 1 / 255 ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : List[str] = size if size is not None else {"shortest_edge": 224}
_lowerCamelCase : str = get_size_dict(__lowerCAmelCase ,default_to_square=__lowerCAmelCase )
_lowerCamelCase : Dict = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowerCamelCase : List[Any] = get_size_dict(__lowerCAmelCase ,param_name="crop_size" )
_lowerCamelCase : Union[str, Any] = do_resize
_lowerCamelCase : List[str] = size
_lowerCamelCase : Dict = do_center_crop
_lowerCamelCase : Dict = crop_size
_lowerCamelCase : Any = resample
_lowerCamelCase : Any = do_rescale
_lowerCamelCase : Optional[Any] = rescale_factor
_lowerCamelCase : List[Any] = do_normalize
_lowerCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self: Dict ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: Dict[str, int] ,__lowerCAmelCase: PILImageResampling = PILImageResampling.BILINEAR ,__lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
_lowerCamelCase : List[str] = get_size_dict(__lowerCAmelCase ,default_to_square=__lowerCAmelCase )
if "shortest_edge" in size:
_lowerCamelCase : Dict = get_resize_output_image_size(__lowerCAmelCase ,size["shortest_edge"] ,default_to_square=__lowerCAmelCase )
elif "height" in size and "width" in size:
_lowerCamelCase : Optional[Any] = (size["height"], size["width"])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(__lowerCAmelCase ,size=__lowerCAmelCase ,resample=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Dict ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: Dict[str, int] ,__lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None ,**__lowerCAmelCase: Any ,):
'''simple docstring'''
_lowerCamelCase : str = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(__lowerCAmelCase ,size=(size["height"], size["width"]) ,data_format=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: List[str] ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: Union[int, float] ,__lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None ,**__lowerCAmelCase: Optional[Any] ,):
'''simple docstring'''
return rescale(__lowerCAmelCase ,scale=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Dict ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: Union[float, List[float]] ,__lowerCAmelCase: Union[float, List[float]] ,__lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None ,**__lowerCAmelCase: Dict ,):
'''simple docstring'''
return normalize(__lowerCAmelCase ,mean=__lowerCAmelCase ,std=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: ImageInput ,__lowerCAmelCase: bool = None ,__lowerCAmelCase: Dict[str, int] = None ,__lowerCAmelCase: PILImageResampling = None ,__lowerCAmelCase: bool = None ,__lowerCAmelCase: Dict[str, int] = None ,__lowerCAmelCase: bool = None ,__lowerCAmelCase: float = None ,__lowerCAmelCase: bool = None ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,__lowerCAmelCase: Optional[ChannelDimension] = ChannelDimension.FIRST ,):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_lowerCamelCase : int = to_numpy_array(__lowerCAmelCase )
if do_resize:
_lowerCamelCase : Dict = self.resize(image=__lowerCAmelCase ,size=__lowerCAmelCase ,resample=__lowerCAmelCase )
if do_center_crop:
_lowerCamelCase : Any = self.center_crop(__lowerCAmelCase ,size=__lowerCAmelCase )
if do_rescale:
_lowerCamelCase : Optional[Any] = self.rescale(image=__lowerCAmelCase ,scale=__lowerCAmelCase )
if do_normalize:
_lowerCamelCase : Optional[int] = self.normalize(image=__lowerCAmelCase ,mean=__lowerCAmelCase ,std=__lowerCAmelCase )
_lowerCamelCase : Tuple = to_channel_dimension_format(__lowerCAmelCase ,__lowerCAmelCase )
return image
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: ImageInput ,__lowerCAmelCase: bool = None ,__lowerCAmelCase: Dict[str, int] = None ,__lowerCAmelCase: PILImageResampling = None ,__lowerCAmelCase: bool = None ,__lowerCAmelCase: Dict[str, int] = None ,__lowerCAmelCase: bool = None ,__lowerCAmelCase: float = None ,__lowerCAmelCase: bool = None ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,__lowerCAmelCase: Optional[Union[str, TensorType]] = None ,__lowerCAmelCase: ChannelDimension = ChannelDimension.FIRST ,**__lowerCAmelCase: str ,):
'''simple docstring'''
_lowerCamelCase : Dict = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
_lowerCamelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : str = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : Any = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : List[Any] = image_std if image_std is not None else self.image_std
_lowerCamelCase : Optional[Any] = size if size is not None else self.size
_lowerCamelCase : Union[str, Any] = get_size_dict(__lowerCAmelCase ,default_to_square=__lowerCAmelCase )
_lowerCamelCase : int = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase : Any = get_size_dict(__lowerCAmelCase ,param_name="crop_size" )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_lowerCamelCase : List[Any] = make_batched(__lowerCAmelCase )
_lowerCamelCase : str = [
[
self._preprocess_image(
image=__lowerCAmelCase ,do_resize=__lowerCAmelCase ,size=__lowerCAmelCase ,resample=__lowerCAmelCase ,do_center_crop=__lowerCAmelCase ,crop_size=__lowerCAmelCase ,do_rescale=__lowerCAmelCase ,rescale_factor=__lowerCAmelCase ,do_normalize=__lowerCAmelCase ,image_mean=__lowerCAmelCase ,image_std=__lowerCAmelCase ,data_format=__lowerCAmelCase ,)
for img in video
]
for video in videos
]
_lowerCamelCase : Union[str, Any] = {"pixel_values": videos}
return BatchFeature(data=__lowerCAmelCase ,tensor_type=__lowerCAmelCase ) | 46 |
'''simple docstring'''
import random
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = num - 1
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE : List[str] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE : List[Any] = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE : List[Any] = pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if v != 1:
SCREAMING_SNAKE_CASE : Tuple = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE : Any = i + 1
SCREAMING_SNAKE_CASE : str = (v**2) % num
return True
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if num < 2:
return False
SCREAMING_SNAKE_CASE : List[str] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowerCamelCase_ )
def __A ( lowerCamelCase_ = 10_24 ):
"""simple docstring"""
while True:
SCREAMING_SNAKE_CASE : Optional[Any] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowerCamelCase_ ):
return num
if __name__ == "__main__":
__UpperCAmelCase = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 379 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
_lowerCamelCase : Any = UNetaDModel(
block_out_channels=(3_2, 6_4),layers_per_block=2,sample_size=3_2,in_channels=3,out_channels=3,down_block_types=("DownBlock2D", "AttnDownBlock2D"),up_block_types=("AttnUpBlock2D", "UpBlock2D"),)
return model
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Tuple = self.dummy_uncond_unet
_lowerCamelCase : List[str] = KarrasVeScheduler()
_lowerCamelCase : Union[str, Any] = KarrasVePipeline(unet=__A,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : str = torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = pipe(num_inference_steps=2,generator=__A,output_type="numpy" ).images
_lowerCamelCase : List[Any] = torch.manual_seed(0 )
_lowerCamelCase : str = pipe(num_inference_steps=2,generator=__A,output_type="numpy",return_dict=__A )[0]
_lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
_lowerCamelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : int = "google/ncsnpp-celebahq-256"
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = KarrasVeScheduler()
_lowerCamelCase : Dict = KarrasVePipeline(unet=__A,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : Optional[Any] = torch.manual_seed(0 )
_lowerCamelCase : Any = pipe(num_inference_steps=2_0,generator=__A,output_type="numpy" ).images
_lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
_lowerCamelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 11 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """speech_to_text_2"""
_SCREAMING_SNAKE_CASE = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Any , UpperCamelCase__ : List[str]=1_0_0_0_0 , UpperCamelCase__ : Optional[int]=6 , UpperCamelCase__ : Optional[int]=2_0_4_8 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]="relu" , UpperCamelCase__ : int=2_5_6 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : str=0.0_2 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str=1 , UpperCamelCase__ : str=0 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[int]=1_0_2_4 , **UpperCamelCase__ : List[Any] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = decoder_layerdrop
UpperCamelCase = use_cache
UpperCamelCase = decoder_layers
UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 430 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( A__ , A__ , A__ , ) -> tuple[str, float]:
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 430 | 1 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __lowerCAmelCase ( ) -> List[Any]:
'''simple docstring'''
raise RuntimeError('CUDA out of memory.' )
class UpperCamelCase ( nn.Module ):
def __init__( self : Any ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE = nn.Linear(4 , 5 )
def UpperCamelCase ( self : Any , snake_case__ : str ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(snake_case__ ) ) )
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(snake_case__ : int ):
nonlocal batch_sizes
batch_sizes.append(snake_case__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(snake_case__ , [1_2_8, 6_4, 3_2, 1_6, 8] )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(snake_case__ : Union[str, Any] , snake_case__ : Any ):
nonlocal batch_sizes
batch_sizes.append(snake_case__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = mock_training_loop_function('hello' )
self.assertListEqual(snake_case__ , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(snake_case__ : Union[str, Any] ):
pass
with self.assertRaises(snake_case__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self : int ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(snake_case__ : Union[str, Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(snake_case__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self : int ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(snake_case__ : int , snake_case__ : Tuple , snake_case__ : Tuple ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(snake_case__ ) as cm:
mock_training_loop_function(1_2_8 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(snake_case__ : Dict ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(snake_case__ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , snake_case__ )
SCREAMING_SNAKE_CASE = release_memory(snake_case__ )
self.assertEqual(torch.cuda.memory_allocated() , snake_case__ )
| 673 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : str , snake_case__ : str=1_3 , snake_case__ : Tuple=7 , snake_case__ : Tuple=True , snake_case__ : Tuple=True , snake_case__ : List[str]=False , snake_case__ : Any=True , snake_case__ : Union[str, Any]=9_9 , snake_case__ : Dict=3_2 , snake_case__ : Optional[Any]=5 , snake_case__ : Optional[Any]=4 , snake_case__ : Union[str, Any]=3_7 , snake_case__ : Tuple="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : int=4 , snake_case__ : List[str]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase =(OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase =(
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 1_0] , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 673 | 1 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def __UpperCamelCase ( lowercase_ : Dict ):
"""simple docstring"""
a_ = VideoMAEConfig()
set_architecture_configs(lowerCamelCase_ , lowerCamelCase_ )
if "finetuned" not in model_name:
a_ = False
if "finetuned" in model_name:
a_ = "huggingface/label-files"
if "kinetics" in model_name:
a_ = 400
a_ = "kinetics400-id2label.json"
elif "ssv2" in model_name:
a_ = 174
a_ = "something-something-v2-id2label.json"
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
a_ = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
a_ = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase ( lowercase_ : Optional[Any] , lowercase_ : int ):
"""simple docstring"""
if "small" in model_name:
a_ = 384
a_ = 1_536
a_ = 12
a_ = 16
a_ = 12
a_ = 3
a_ = 192
a_ = 768
elif "large" in model_name:
a_ = 1_024
a_ = 4_096
a_ = 24
a_ = 16
a_ = 12
a_ = 8
a_ = 512
a_ = 2_048
elif "huge" in model_name:
a_ = 1_280
a_ = 5_120
a_ = 32
a_ = 16
a_ = 12
a_ = 8
a_ = 640
a_ = 2_560
elif "base" not in model_name:
raise ValueError('Model name should include either \"small\", \"base\", \"large\", or \"huge\"' )
def __UpperCamelCase ( lowercase_ : Tuple ):
"""simple docstring"""
if "encoder." in name:
a_ = name.replace('encoder.' , '' )
if "cls_token" in name:
a_ = name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
a_ = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
a_ = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
a_ = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a_ = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
a_ = name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
a_ = name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
a_ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
a_ = name.replace('attn' , 'attention.self' )
if "attn" in name:
a_ = name.replace('attn' , 'attention.attention' )
if "norm1" in name:
a_ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a_ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a_ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a_ = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
a_ = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
a_ = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
a_ = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
a_ = name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
a_ = name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
a_ = name.replace('head' , 'classifier' )
return name
def __UpperCamelCase ( lowercase_ : Any , lowercase_ : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a_ = orig_state_dict.pop(lowerCamelCase_ )
if key.startswith('encoder.' ):
a_ = key.replace('encoder.' , '' )
if "qkv" in key:
a_ = key.split('.' )
if key.startswith('decoder.blocks' ):
a_ = config.decoder_hidden_size
a_ = int(key_split[2] )
a_ = "decoder.decoder_layers."
if "weight" in key:
a_ = val[:dim, :]
a_ = val[dim : dim * 2, :]
a_ = val[-dim:, :]
else:
a_ = config.hidden_size
a_ = int(key_split[1] )
a_ = "videomae.encoder.layer."
if "weight" in key:
a_ = val[:dim, :]
a_ = val[dim : dim * 2, :]
a_ = val[-dim:, :]
else:
a_ = val
return orig_state_dict
def __UpperCamelCase ( ):
"""simple docstring"""
a_ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
a_ = np.load(lowerCamelCase_ )
return list(lowerCamelCase_ )
def __UpperCamelCase ( lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ):
"""simple docstring"""
a_ = get_videomae_config(lowerCamelCase_ )
if "finetuned" in model_name:
a_ = VideoMAEForVideoClassification(lowerCamelCase_ )
else:
a_ = VideoMAEForPreTraining(lowerCamelCase_ )
# download original checkpoint, hosted on Google Drive
a_ = "pytorch_model.bin"
gdown.cached_download(lowerCamelCase_ , lowerCamelCase_ , quiet=lowerCamelCase_ )
a_ = torch.load(lowerCamelCase_ , map_location='cpu' )
if "model" in files:
a_ = files["model"]
else:
a_ = files["module"]
a_ = convert_state_dict(lowerCamelCase_ , lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
# verify model on basic input
a_ = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
a_ = prepare_video()
a_ = image_processor(lowerCamelCase_ , return_tensors='pt' )
if "finetuned" not in model_name:
a_ = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
a_ = torch.load(lowerCamelCase_ )
a_ = model(**lowerCamelCase_ )
a_ = outputs.logits
a_ = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
a_ = torch.Size([1, 400] )
a_ = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
a_ = torch.Size([1, 174] )
a_ = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
a_ = torch.Size([1, 1_408, 1_536] )
a_ = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
a_ = torch.Size([1, 1_408, 1_536] )
a_ = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
a_ = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
a_ = torch.Size([1, 1_408, 1_536] )
a_ = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
a_ = torch.Size([1, 400] )
a_ = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
a_ = torch.Size([1, 400] )
a_ = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
a_ = torch.Size([1, 400] )
a_ = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
a_ = torch.Size([1, 400] )
a_ = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
a_ = torch.Size([1, 1_408, 1_536] )
a_ = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
a_ = torch.Size([1, 174] )
a_ = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
a_ = torch.Size([1, 1_408, 1_536] )
a_ = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
a_ = torch.Size([1, 174] )
a_ = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowerCamelCase_ , atol=1E-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
a_ = outputs.loss
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(lowerCamelCase_ , organization='nielsr' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__lowerCAmelCase = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 536 |
"""simple docstring"""
import requests
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: str , lowerCamelCase_: str ):
"""simple docstring"""
snake_case : List[str] = {"Content-Type": "application/json"}
snake_case : int = requests.post(lowerCamelCase_ , json={"text": message_body} , headers=lowerCamelCase_ )
if response.status_code != 2_0_0:
snake_case : str = (
"Request to slack returned an error "
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(lowerCamelCase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 449 | 0 |
import unittest
from transformers import DonutProcessor
_snake_case = 'naver-clova-ix/donut-base'
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase( self ) -> Union[str, Any]:
__UpperCamelCase = DonutProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> int:
__UpperCamelCase = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
__UpperCamelCase = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
__UpperCamelCase = self.processor.tokenajson(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 706 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
_snake_case = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _a ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
"""simple docstring"""
for attribute in key.split('.' ):
__UpperCamelCase = getattr(__lowercase , __lowercase )
if weight_type is not None:
__UpperCamelCase = getattr(__lowercase , __lowercase ).shape
else:
__UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__UpperCamelCase = value
elif weight_type == "weight_g":
__UpperCamelCase = value
elif weight_type == "weight_v":
__UpperCamelCase = value
elif weight_type == "bias":
__UpperCamelCase = value
else:
__UpperCamelCase = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _a ( __lowercase , __lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = []
__UpperCamelCase = fairseq_model.state_dict()
__UpperCamelCase = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__lowercase , __lowercase , __lowercase , __lowercase , hf_model.config.feat_extract_norm == 'group' , )
__UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__UpperCamelCase = True
if "*" in mapped_key:
__UpperCamelCase = name.split(__lowercase )[0].split('.' )[-2]
__UpperCamelCase = mapped_key.replace('*' , __lowercase )
if "weight_g" in name:
__UpperCamelCase = 'weight_g'
elif "weight_v" in name:
__UpperCamelCase = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase = 'weight'
else:
__UpperCamelCase = None
set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _a ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = full_name.split('conv_layers.' )[-1]
__UpperCamelCase = name.split('.' )
__UpperCamelCase = int(items[0] )
__UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__UpperCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__UpperCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__UpperCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__UpperCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowercase )
@torch.no_grad()
def _a ( __lowercase , __lowercase , __lowercase=None ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = torch.load(__lowercase )
__UpperCamelCase = WavLMConfigOrig(checkpoint['cfg'] )
__UpperCamelCase = WavLMOrig(__lowercase )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__UpperCamelCase = WavLMConfig.from_pretrained(__lowercase )
else:
__UpperCamelCase = WavLMConfig()
__UpperCamelCase = WavLMModel(__lowercase )
recursively_load_weights(__lowercase , __lowercase )
hf_wavlm.save_pretrained(__lowercase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_snake_case = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 567 | 0 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=99 , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.02 , lowercase_=False , lowercase_=True , lowercase_="None" , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Any:
a__ =parent
a__ =batch_size
a__ =seq_length
a__ =is_training
a__ =use_input_mask
a__ =use_token_type_ids
a__ =use_labels
a__ =vocab_size
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =type_sequence_label_size
a__ =initializer_range
a__ =num_labels
a__ =num_choices
a__ =relative_attention
a__ =position_biased_input
a__ =pos_att_type
a__ =scope
def __UpperCamelCase ( self) -> List[str]:
a__ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__ =None
if self.use_input_mask:
a__ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
a__ =None
if self.use_token_type_ids:
a__ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a__ =None
a__ =None
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a__ =ids_tensor([self.batch_size] , self.num_choices)
a__ =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self) -> Any:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __UpperCamelCase ( self , lowercase_) -> Tuple:
self.parent.assertListEqual(list(result.loss.size()) , [])
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Any:
a__ =DebertaVaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_)[0]
a__ =model(lowercase_ , token_type_ids=lowercase_)[0]
a__ =model(lowercase_)[0]
self.parent.assertListEqual(list(sequence_output.size()) , [self.batch_size, self.seq_length, self.hidden_size])
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Any:
a__ =DebertaVaForMaskedLM(config=lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Dict:
a__ =self.num_labels
a__ =DebertaVaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_)
self.parent.assertListEqual(list(result.logits.size()) , [self.batch_size, self.num_labels])
self.check_loss_output(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> int:
a__ =self.num_labels
a__ =DebertaVaForTokenClassification(config=lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
a__ =DebertaVaForQuestionAnswering(config=lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Tuple:
a__ =DebertaVaForMultipleChoice(config=lowercase_)
model.to(lowercase_)
model.eval()
a__ =input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a__ =token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a__ =input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a__ =model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __UpperCamelCase ( self) -> Tuple:
a__ =self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) =config_and_inputs
a__ ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case =(
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case =True
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> List[Any]:
a__ =DebertaVaModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def __UpperCamelCase ( self) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self) -> int:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowercase_)
def __UpperCamelCase ( self) -> Dict:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase_)
def __UpperCamelCase ( self) -> str:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase_)
def __UpperCamelCase ( self) -> Dict:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowercase_)
def __UpperCamelCase ( self) -> Dict:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowercase_)
@slow
def __UpperCamelCase ( self) -> Union[str, Any]:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ =DebertaVaModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
@unittest.skip(reason='Model not available yet')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@slow
def __UpperCamelCase ( self) -> List[str]:
a__ =DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge')
a__ =torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
a__ =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
a__ =model(lowercase_ , attention_mask=lowercase_)[0]
# compare the actual values for a slice.
a__ =torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase_ , atol=1e-4) , F"""{output[:, 1:4, 1:4]}""")
| 20 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ) -> Union[str, Any]:
a__ =parent
a__ =out_indices if out_indices is not None else [4]
a__ =stage_names
a__ =out_features
a__ =backbone
a__ =batch_size
a__ =image_size
a__ =num_channels
a__ =use_pretrained_backbone
a__ =is_training
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =self.get_config()
return config, pixel_values
def __UpperCamelCase ( self) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =TimmBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
a__ =model(lowercase_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __UpperCamelCase ( self) -> str:
a__ =self.prepare_config_and_inputs()
a__ , a__ =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(TimmBackbone,) if is_torch_available() else ()
snake_case ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =TimmBackboneModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self) -> str:
a__ ='resnet18'
a__ ='microsoft/resnet-18'
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_)
a__ =AutoBackbone.from_pretrained(lowercase_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3])
a__ =AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ =self.all_model_classes[0]
a__ =model_class(lowercase_)
model.to(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(**lowercase_)
a__ =outputs[0][-1]
# Encoder-/Decoder-only models
a__ =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
a__ =copy.deepcopy(lowercase_)
a__ =None
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
a__ =copy.deepcopy(lowercase_)
a__ =False
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
| 20 | 1 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PIL.Image.BICUBIC , A = True , A = None , A = 1 / 2_5_5 , A = True , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : int = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case : int = get_size_dict(A )
snake_case : Optional[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case : Dict = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = do_resize
snake_case : str = size
snake_case : Tuple = resample
snake_case : Any = do_center_crop
snake_case : Tuple = crop_size
snake_case : int = do_rescale
snake_case : Dict = rescale_factor
snake_case : Union[str, Any] = do_normalize
snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A = PIL.Image.BICUBIC , A = None , **A , ) -> np.ndarray:
snake_case : Dict = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
A , size=(size["""height"""], size["""width"""]) , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
snake_case : Any = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> Tuple:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A=None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : str = do_resize if do_resize is not None else self.do_resize
snake_case : Dict = resample if resample is not None else self.resample
snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : int = image_mean if image_mean is not None else self.image_mean
snake_case : List[str] = image_std if image_std is not None else self.image_std
snake_case : Dict = size if size is not None else self.size
snake_case : Tuple = get_size_dict(A )
snake_case : Dict = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : Optional[Any] = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : Dict = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
snake_case : List[str] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
snake_case : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
snake_case : str = [self.normalize(image=A , mean=A , std=A ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 684 | 0 |
from __future__ import annotations
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
print(F'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(lowercase_ ):
print(F'{i}\t\t{d}' )
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
for j in range(lowercase_ ):
lowercase__ , lowercase__ , lowercase__ : str = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> list[float]:
'''simple docstring'''
lowercase__ : Tuple = [float("""inf""" )] * vertex_count
lowercase__ : Optional[int] = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase_ ):
lowercase__ , lowercase__ , lowercase__ : int = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
lowercase__ : Optional[Any] = distance[u] + w
lowercase__ : Union[str, Any] = check_negative_cycle(lowercase_ , lowercase_ , lowercase_ )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : Dict = int(input("""Enter number of vertices: """).strip())
lowerCamelCase__ : Tuple = int(input("""Enter number of edges: """).strip())
lowerCamelCase__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
lowerCamelCase__ : Union[str, Any] = {"""src""": src, """dst""": dest, """weight""": weight}
lowerCamelCase__ : Union[str, Any] = int(input("""\nEnter shortest path source:""").strip())
lowerCamelCase__ : Any = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 12 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 12 | 1 |
def snake_case ( UpperCAmelCase : list ):
if len(UpperCAmelCase ) < 2:
return collection
def circle_sort_util(UpperCAmelCase : list, UpperCAmelCase : int, UpperCAmelCase : int ) -> bool:
A = False
if low == high:
return swapped
A = low
A = high
while left < right:
if collection[left] > collection[right]:
A , A = (
collection[right],
collection[left],
)
A = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
A , A = (
collection[right + 1],
collection[left],
)
A = True
A = low + int((high - low) / 2 )
A = circle_sort_util(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
A = circle_sort_util(UpperCAmelCase, mid + 1, UpperCAmelCase )
return swapped or left_swap or right_swap
A = True
while is_not_sorted is True:
A = circle_sort_util(UpperCAmelCase, 0, len(UpperCAmelCase ) - 1 )
return collection
if __name__ == "__main__":
lowerCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase_ = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 110 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCAmelCase_ = None
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
lowerCAmelCase_ = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
lowerCAmelCase_ = '▁'
class UpperCamelCase ( snake_case__ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = AlbertTokenizer
def __init__( self : str ,_SCREAMING_SNAKE_CASE : int=None ,_SCREAMING_SNAKE_CASE : List[str]=None ,_SCREAMING_SNAKE_CASE : List[str]=True ,_SCREAMING_SNAKE_CASE : str=True ,_SCREAMING_SNAKE_CASE : Optional[int]=False ,_SCREAMING_SNAKE_CASE : List[str]="[CLS]" ,_SCREAMING_SNAKE_CASE : Optional[int]="[SEP]" ,_SCREAMING_SNAKE_CASE : Tuple="<unk>" ,_SCREAMING_SNAKE_CASE : Union[str, Any]="[SEP]" ,_SCREAMING_SNAKE_CASE : Tuple="<pad>" ,_SCREAMING_SNAKE_CASE : Any="[CLS]" ,_SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" ,**_SCREAMING_SNAKE_CASE : Any ,) -> Tuple:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A = (
AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ,normalized=_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
else mask_token
)
super().__init__(
_SCREAMING_SNAKE_CASE ,tokenizer_file=_SCREAMING_SNAKE_CASE ,do_lower_case=_SCREAMING_SNAKE_CASE ,remove_space=_SCREAMING_SNAKE_CASE ,keep_accents=_SCREAMING_SNAKE_CASE ,bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def A( self : int ,_SCREAMING_SNAKE_CASE : List[int] ,_SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A( self : List[Any] ,_SCREAMING_SNAKE_CASE : List[int] ,_SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A( self : Union[str, Any] ,_SCREAMING_SNAKE_CASE : str ,_SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_SCREAMING_SNAKE_CASE ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file ,_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 110 | 1 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class _snake_case ( snake_case_ ):
'''simple docstring'''
def __init__( self: List[Any] , *__UpperCamelCase: Tuple , **__UpperCamelCase: int ) -> Dict:
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
requires_backends(self , "vision" )
self.check_model_type(__UpperCamelCase )
def __call__( self: Dict , __UpperCamelCase: Union[str, List[str], "Image.Image", List["Image.Image"]] , **__UpperCamelCase: List[Any] ) -> Any:
return super().__call__(__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self: List[str] , **__UpperCamelCase: Tuple ) -> Optional[Any]:
return {}, {}, {}
def lowerCAmelCase__ ( self: int , __UpperCamelCase: Optional[Any] ) -> Tuple:
__magic_name__ : Optional[int] = load_image(__UpperCamelCase )
__magic_name__ : int = image.size
__magic_name__ : Union[str, Any] = self.image_processor(images=__UpperCamelCase , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase__ ( self: int , __UpperCamelCase: int ) -> Dict:
__magic_name__ : Optional[Any] = self.model(**__UpperCamelCase )
return model_outputs
def lowerCAmelCase__ ( self: Any , __UpperCamelCase: Optional[Any] ) -> Union[str, Any]:
__magic_name__ : str = model_outputs.predicted_depth
__magic_name__ : Any = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=__UpperCamelCase )
__magic_name__ : Tuple = prediction.squeeze().cpu().numpy()
__magic_name__ : int = (output * 255 / np.max(__UpperCamelCase )).astype("uint8" )
__magic_name__ : Any = Image.fromarray(__UpperCamelCase )
__magic_name__ : Dict = {}
__magic_name__ : str = predicted_depth
__magic_name__ : List[str] = depth
return output_dict | 436 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not postfix_notation:
return 0
__magic_name__ : Optional[Any] = {"+", "-", "*", "/"}
__magic_name__ : list[Any] = []
for token in postfix_notation:
if token in operations:
__magic_name__ , __magic_name__ : List[Any] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod() | 436 | 1 |
"""simple docstring"""
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowercase_ ( self :Tuple ,__UpperCAmelCase :int ,__UpperCAmelCase :int ) -> int:
"""simple docstring"""
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(__UpperCAmelCase ) for s in shape] )}.npy"""
def lowercase_ ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowercase_ ( self :Union[str, Any] ,__UpperCAmelCase :Optional[Any]=0 ,__UpperCAmelCase :List[str]=(4, 4, 64, 64) ,__UpperCAmelCase :Any=False ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : int = jnp.bfloataa if fpaa else jnp.floataa
lowerCamelCase__ : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase ,__UpperCAmelCase ) ) ,dtype=__UpperCAmelCase )
return image
def lowercase_ ( self :Union[str, Any] ,__UpperCAmelCase :Optional[Any]=False ,__UpperCAmelCase :List[str]="CompVis/stable-diffusion-v1-4" ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Tuple = jnp.bfloataa if fpaa else jnp.floataa
lowerCamelCase__ : List[Any] = '''bf16''' if fpaa else None
lowerCamelCase__ , lowerCamelCase__ : Any = FlaxUNetaDConditionModel.from_pretrained(
__UpperCAmelCase ,subfolder='''unet''' ,dtype=__UpperCAmelCase ,revision=__UpperCAmelCase )
return model, params
def lowercase_ ( self :Union[str, Any] ,__UpperCAmelCase :Dict=0 ,__UpperCAmelCase :Dict=(4, 77, 7_68) ,__UpperCAmelCase :int=False ) -> str:
"""simple docstring"""
lowerCamelCase__ : Tuple = jnp.bfloataa if fpaa else jnp.floataa
lowerCamelCase__ : Tuple = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase ,__UpperCAmelCase ) ) ,dtype=__UpperCAmelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 10_00, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
] )
def lowercase_ ( self :Any ,__UpperCAmelCase :Any ,__UpperCAmelCase :str ,__UpperCAmelCase :Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' ,fpaa=__UpperCAmelCase )
lowerCamelCase__ : int = self.get_latents(__UpperCAmelCase ,fpaa=__UpperCAmelCase )
lowerCamelCase__ : List[Any] = self.get_encoder_hidden_states(__UpperCAmelCase ,fpaa=__UpperCAmelCase )
lowerCamelCase__ : Tuple = model.apply(
{'''params''': params} ,__UpperCAmelCase ,jnp.array(__UpperCAmelCase ,dtype=jnp.intaa ) ,encoder_hidden_states=__UpperCAmelCase ,).sample
assert sample.shape == latents.shape
lowerCamelCase__ : Any = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) ,dtype=jnp.floataa )
lowerCamelCase__ : Optional[int] = jnp.array(__UpperCAmelCase ,dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 10_00, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
] )
def lowercase_ ( self :Tuple ,__UpperCAmelCase :Optional[Any] ,__UpperCAmelCase :Any ,__UpperCAmelCase :Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' ,fpaa=__UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.get_latents(__UpperCAmelCase ,shape=(4, 4, 96, 96) ,fpaa=__UpperCAmelCase )
lowerCamelCase__ : str = self.get_encoder_hidden_states(__UpperCAmelCase ,shape=(4, 77, 10_24) ,fpaa=__UpperCAmelCase )
lowerCamelCase__ : List[Any] = model.apply(
{'''params''': params} ,__UpperCAmelCase ,jnp.array(__UpperCAmelCase ,dtype=jnp.intaa ) ,encoder_hidden_states=__UpperCAmelCase ,).sample
assert sample.shape == latents.shape
lowerCamelCase__ : Optional[int] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) ,dtype=jnp.floataa )
lowerCamelCase__ : int = jnp.array(__UpperCAmelCase ,dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1E-2 )
| 121 | """simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCAmelCase : Dict = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def __init__( self :Optional[Any] ,__UpperCAmelCase :int=-1 ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Any = label_idx
def lowercase_ ( self :Optional[int] ,__UpperCAmelCase :Union[str, Any] ,__UpperCAmelCase :Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCamelCase__ : List[Any] = mode.value
lowerCamelCase__ : Dict = os.path.join(__UpperCAmelCase ,F"""{mode}.txt""" )
lowerCamelCase__ : Optional[int] = 1
lowerCamelCase__ : int = []
with open(__UpperCAmelCase ,encoding='''utf-8''' ) as f:
lowerCamelCase__ : str = []
lowerCamelCase__ : Any = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=__UpperCAmelCase ,labels=__UpperCAmelCase ) )
guid_index += 1
lowerCamelCase__ : str = []
lowerCamelCase__ : Optional[Any] = []
else:
lowerCamelCase__ : Optional[int] = line.split(''' ''' )
words.append(splits[0] )
if len(__UpperCAmelCase ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' ,'''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=__UpperCAmelCase ,labels=__UpperCAmelCase ) )
return examples
def lowercase_ ( self :Optional[Any] ,__UpperCAmelCase :TextIO ,__UpperCAmelCase :TextIO ,__UpperCAmelCase :List ) -> int:
"""simple docstring"""
lowerCamelCase__ : str = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(__UpperCAmelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCamelCase__ : str = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(__UpperCAmelCase )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' ,line.split()[0] )
def lowercase_ ( self :Any ,__UpperCAmelCase :str ) -> List[str]:
"""simple docstring"""
if path:
with open(__UpperCAmelCase ,'''r''' ) as f:
lowerCamelCase__ : str = f.read().splitlines()
if "O" not in labels:
lowerCamelCase__ : Union[str, Any] = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def __init__( self :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(label_idx=-2 )
def lowercase_ ( self :Optional[int] ,__UpperCAmelCase :str ) -> List[str]:
"""simple docstring"""
if path:
with open(__UpperCAmelCase ,'''r''' ) as f:
lowerCamelCase__ : str = f.read().splitlines()
if "O" not in labels:
lowerCamelCase__ : Any = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def lowercase_ ( self :Dict ,__UpperCAmelCase :int ,__UpperCAmelCase :Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCamelCase__ : Optional[Any] = mode.value
lowerCamelCase__ : Union[str, Any] = os.path.join(__UpperCAmelCase ,F"""{mode}.txt""" )
lowerCamelCase__ : Optional[int] = 1
lowerCamelCase__ : int = []
with open(__UpperCAmelCase ,encoding='''utf-8''' ) as f:
for sentence in parse_incr(__UpperCAmelCase ):
lowerCamelCase__ : str = []
lowerCamelCase__ : Tuple = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=__UpperCAmelCase ,labels=__UpperCAmelCase ) )
guid_index += 1
return examples
def lowercase_ ( self :str ,__UpperCAmelCase :TextIO ,__UpperCAmelCase :TextIO ,__UpperCAmelCase :List ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Tuple = 0
for sentence in parse_incr(__UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = preds_list[example_id]
lowerCamelCase__ : Optional[Any] = ''''''
for token in sentence:
out += F"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(__UpperCAmelCase )
example_id += 1
def lowercase_ ( self :Dict ,__UpperCAmelCase :str ) -> List[str]:
"""simple docstring"""
if path:
with open(__UpperCAmelCase ,'''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 121 | 1 |
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> tuple[float, float]:
"""simple docstring"""
if not len(__UpperCamelCase ) == len(__UpperCamelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = equationa
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = equationa
# Calculate the determinants of the matrices
lowerCAmelCase_ : List[str] = aa * ba - aa * ba
lowerCAmelCase_ : Any = ca * ba - ca * ba
lowerCAmelCase_ : Optional[int] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
lowerCAmelCase_ : str = determinant_x / determinant
lowerCAmelCase_ : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 610 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
lowercase__ = False
@skip_mps
class __lowerCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple = StableDiffusionAttendAndExcitePipeline
a_ : Any = False
a_ : int = TEXT_TO_IMAGE_PARAMS
a_ : Any = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
a_ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
a_ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCamelCase ( cls : str ):
super().setUpClass()
torch.use_deterministic_algorithms(a_ )
@classmethod
def lowerCamelCase ( cls : List[Any] ):
super().tearDownClass()
torch.use_deterministic_algorithms(a_ )
def lowerCamelCase ( self : List[Any] ):
torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , )
lowerCAmelCase_ : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
torch.manual_seed(0 )
lowerCAmelCase_ : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
lowerCAmelCase_ : str = CLIPTextModel(a_ )
lowerCAmelCase_ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ : List[str] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCamelCase ( self : int , a_ : List[Any] , a_ : Tuple=0 ):
if str(a_ ).startswith("mps" ):
lowerCAmelCase_ : List[Any] = torch.manual_seed(a_ )
else:
lowerCAmelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCAmelCase_ : Union[str, Any] = {
"prompt": "a cat and a frog",
"token_indices": [2, 5],
"generator": generator,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
"max_iter_to_alter": 2,
"thresholds": {0: 0.7},
}
return inputs
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : Optional[Any] = "cpu"
lowerCAmelCase_ : str = self.get_dummy_components()
lowerCAmelCase_ : Any = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : str = self.get_dummy_inputs(a_ )
lowerCAmelCase_ : Any = pipe(**a_ ).images
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCAmelCase_ : Dict = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
lowerCAmelCase_ : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1e-3 )
def lowerCamelCase ( self : int ):
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def lowerCamelCase ( self : str ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase ( self : List[str] ):
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def lowerCamelCase ( self : str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowerCamelCase ( self : Optional[Any] ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def lowerCamelCase ( self : Tuple ):
super().test_save_load_local(expected_max_difference=5e-4 )
def lowerCamelCase ( self : Any ):
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCamelCase ( cls : List[str] ):
super().setUpClass()
torch.use_deterministic_algorithms(a_ )
@classmethod
def lowerCamelCase ( cls : Optional[int] ):
super().tearDownClass()
torch.use_deterministic_algorithms(a_ )
def lowerCamelCase ( self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : Optional[int] = torch.manual_seed(51 )
lowerCAmelCase_ : Tuple = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , safety_checker=a_ , torch_dtype=torch.floataa )
pipe.to("cuda" )
lowerCAmelCase_ : Dict = "a painting of an elephant with glasses"
lowerCAmelCase_ : Optional[int] = [5, 7]
lowerCAmelCase_ : Optional[int] = pipe(
prompt=a_ , token_indices=a_ , guidance_scale=7.5 , generator=a_ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="numpy" , ).images[0]
lowerCAmelCase_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 610 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case : Union[str, Any] = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
snake_case : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 715 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowercase__ ( __UpperCamelCase : Any , __UpperCamelCase : Any=None ):
'''simple docstring'''
__lowercase = None
if token is not None:
__lowercase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
__lowercase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__lowercase = requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json()
__lowercase = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
__lowercase = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__UpperCamelCase ):
__lowercase = requests.get(url + F'''&page={i + 2}''' , headers=__UpperCamelCase ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowercase__ ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple=None ):
'''simple docstring'''
__lowercase = None
if token is not None:
__lowercase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
__lowercase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
__lowercase = requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json()
__lowercase = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
__lowercase = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__UpperCamelCase ):
__lowercase = requests.get(url + F'''&page={i + 2}''' , headers=__UpperCamelCase ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowercase__ ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ):
'''simple docstring'''
__lowercase = None
if token is not None:
__lowercase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
__lowercase = requests.get(__UpperCamelCase , headers=__UpperCamelCase , allow_redirects=__UpperCamelCase )
__lowercase = result.headers["""Location"""]
__lowercase = requests.get(__UpperCamelCase , allow_redirects=__UpperCamelCase )
__lowercase = os.path.join(__UpperCamelCase , F'''{artifact_name}.zip''' )
with open(__UpperCamelCase , """wb""" ) as fp:
fp.write(response.content )
def lowercase__ ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple=None ):
'''simple docstring'''
__lowercase = []
__lowercase = []
__lowercase = None
with zipfile.ZipFile(__UpperCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__UpperCamelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__UpperCamelCase ) as f:
for line in f:
__lowercase = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__lowercase = line[: line.index(""": """ )]
__lowercase = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
__lowercase = line[len("""FAILED """ ) :]
failed_tests.append(__UpperCamelCase )
elif filename == "job_name.txt":
__lowercase = line
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(__UpperCamelCase )} for `errors` '''
F'''and {len(__UpperCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
__lowercase = None
if job_name and job_links:
__lowercase = job_links.get(__UpperCamelCase , __UpperCamelCase )
# A list with elements of the form (line of error, error, failed test)
__lowercase = [x + [y] + [job_link] for x, y in zip(__UpperCamelCase , __UpperCamelCase )]
return result
def lowercase__ ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int]=None ):
'''simple docstring'''
__lowercase = []
__lowercase = [os.path.join(__UpperCamelCase , __UpperCamelCase ) for p in os.listdir(__UpperCamelCase ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__UpperCamelCase , job_links=__UpperCamelCase ) )
return errors
def lowercase__ ( __UpperCamelCase : Any , __UpperCamelCase : List[Any]=None ):
'''simple docstring'''
__lowercase = Counter()
counter.update([x[1] for x in logs] )
__lowercase = counter.most_common()
__lowercase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__lowercase = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
__lowercase = dict(sorted(r.items() , key=lambda __UpperCamelCase : item[1]["count"] , reverse=__UpperCamelCase ) )
return r
def lowercase__ ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
__lowercase = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
__lowercase = test.split("""/""" )[2]
else:
__lowercase = None
return test
def lowercase__ ( __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any]=None ):
'''simple docstring'''
__lowercase = [(x[0], x[1], get_model(x[2] )) for x in logs]
__lowercase = [x for x in logs if x[2] is not None]
__lowercase = {x[2] for x in logs}
__lowercase = {}
for test in tests:
__lowercase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__lowercase = counter.most_common()
__lowercase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__lowercase = sum(error_counts.values() )
if n_errors > 0:
__lowercase = {"""count""": n_errors, """errors""": error_counts}
__lowercase = dict(sorted(r.items() , key=lambda __UpperCamelCase : item[1]["count"] , reverse=__UpperCamelCase ) )
return r
def lowercase__ ( __UpperCamelCase : int ):
'''simple docstring'''
__lowercase = """| no. | error | status |"""
__lowercase = """|-:|:-|:-|"""
__lowercase = [header, sep]
for error in reduced_by_error:
__lowercase = reduced_by_error[error]["""count"""]
__lowercase = F'''| {count} | {error[:100]} | |'''
lines.append(__UpperCamelCase )
return "\n".join(__UpperCamelCase )
def lowercase__ ( __UpperCamelCase : Tuple ):
'''simple docstring'''
__lowercase = """| model | no. of errors | major error | count |"""
__lowercase = """|-:|-:|-:|-:|"""
__lowercase = [header, sep]
for model in reduced_by_model:
__lowercase = reduced_by_model[model]["""count"""]
__lowercase , __lowercase = list(reduced_by_model[model]["""errors"""].items() )[0]
__lowercase = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(__UpperCamelCase )
return "\n".join(__UpperCamelCase )
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
snake_case : Any = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
snake_case : List[str] = get_job_links(args.workflow_run_id, token=args.token)
snake_case : List[str] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
snake_case : str = k.find(' / ')
snake_case : str = k[index + len(' / ') :]
snake_case : List[Any] = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
snake_case : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
snake_case : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
snake_case : Union[str, Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
snake_case : Union[str, Any] = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
snake_case : List[str] = reduce_by_error(errors)
snake_case : str = reduce_by_model(errors)
snake_case : str = make_github_table(reduced_by_error)
snake_case : Dict = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 339 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __UpperCamelCase ( UpperCamelCase ):
@staticmethod
@abstractmethod
def UpperCAmelCase__ ( UpperCAmelCase : ArgumentParser ) -> Optional[Any]:
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__ ( self : str ) -> List[str]:
raise NotImplementedError() | 553 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class __UpperCamelCase :
def __init__( self : Union[str, Any] , UpperCAmelCase : List[Any] ) -> List[str]:
lowerCAmelCase :Dict = str(id_ )
lowerCAmelCase :str = None
lowerCAmelCase :Any = None
lowerCAmelCase :List[str] = []
lowerCAmelCase :List[Any] = {} # {vertex:distance}
def __lt__( self : Union[str, Any] , UpperCAmelCase : Any ) -> str:
return self.key < other.key
def __repr__( self : Union[str, Any] ) -> Dict:
return self.id
def UpperCAmelCase__ ( self : Any , UpperCAmelCase : Tuple ) -> List[str]:
self.neighbors.append(UpperCAmelCase )
def UpperCAmelCase__ ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] ) -> Optional[int]:
lowerCAmelCase :str = weight
def UpperCAmelCase ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , a__ )
graph[b - 1].add_edge(graph[a - 1] , a__ )
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
lowerCAmelCase :List[str] = []
for u in graph:
lowerCAmelCase :Tuple = math.inf
lowerCAmelCase :Any = None
lowerCAmelCase :Dict = 0
lowerCAmelCase :int = graph[:]
while q:
lowerCAmelCase :str = min(a__ )
q.remove(a__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowerCAmelCase :List[str] = u
lowerCAmelCase :List[Any] = u.edges[v.id]
for i in range(1 , len(a__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
for u in graph:
lowerCAmelCase :List[str] = math.inf
lowerCAmelCase :str = None
lowerCAmelCase :Optional[int] = 0
lowerCAmelCase :Dict = list(a__ )
hq.heapify(a__ )
while h:
lowerCAmelCase :Union[str, Any] = hq.heappop(a__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowerCAmelCase :int = u
lowerCAmelCase :List[Any] = u.edges[v.id]
hq.heapify(a__ )
for i in range(1 , len(a__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 553 | 1 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_UpperCAmelCase : List[Any] = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class lowercase :
__lowercase : str
__lowercase : Optional[str] = None
__lowercase : Optional[Union[str, int]] = None
__lowercase : Optional[Union[str, int]] = None
__lowercase : Optional[Union[str, int]] = None
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Optional[Any]:
"""simple docstring"""
return F'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
return self.major, self.minor, self.patch
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if isinstance(A_ , A_ ):
return Version(A_ )
elif isinstance(A_ , A_ ):
return other
raise TypeError(F'''{other} (type {type(A_ )}) cannot be compared to version.''' )
def __eq__( self , A_ ) -> int:
"""simple docstring"""
try:
UpperCamelCase = self._validate_operand(A_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = self._validate_operand(A_ )
return self.tuple < other.tuple
def __hash__( self ) -> List[Any]:
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __UpperCamelCase ( cls , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
return self.version_str
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = _VERSION_REG.match(lowercase )
if not res:
raise ValueError(f'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(lowercase ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def A ( lowercase ) -> int:
'''simple docstring'''
return ".".join(str(lowercase ) for v in version_tuple )
| 3 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = "data2vec-text"
def __init__( self , A_=30_522 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
class lowercase ( _SCREAMING_SNAKE_CASE ):
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 3 | 1 |
from __future__ import annotations
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
lowercase__ : Tuple = 2
lowercase__ : Dict = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(SCREAMING_SNAKE_CASE_ )
if n > 1:
factors.append(SCREAMING_SNAKE_CASE_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 |
from scipy.stats import spearmanr
import datasets
snake_case_ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
snake_case_ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
snake_case_ = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ (datasets.Metric ):
def snake_case_ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float'),
'references': datasets.Value('float'),
}) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def snake_case_ ( self , a , a , a=False):
lowercase__ : int = spearmanr(a , a)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 164 | 1 |
'''simple docstring'''
import string
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_a = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
_a = string.ascii_uppercase.find(UpperCamelCase )
_a = num - key
if num < 0:
_a = num + len(string.ascii_uppercase )
_a = translated + string.ascii_uppercase[num]
else:
_a = translated + symbol
print(f'Decryption using Key #{key}: {translated}' )
def snake_case_ ():
'''simple docstring'''
_a = input('''Encrypted message: ''' )
_a = message.upper()
decrypt(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 377 |
'''simple docstring'''
import sys
from collections import defaultdict
class A :
def __init__( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = []
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] ) -> int:
"""simple docstring"""
return self.node_position[vertex]
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a = pos
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ) -> Any:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_a = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_a = 2 * start + 1
else:
_a = 2 * start + 2
if heap[smallest_child] < heap[start]:
_a , _a = heap[smallest_child], positions[smallest_child]
_a , _a = (
heap[start],
positions[start],
)
_a , _a = temp, tempa
_a = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCAmelCase_ )
self.top_to_bottom(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict ) -> Any:
"""simple docstring"""
_a = position[index]
while index != 0:
_a = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_a = heap[parent]
_a = position[parent]
self.set_position(position[parent] , lowerCAmelCase_ )
else:
_a = val
_a = temp
self.set_position(lowerCAmelCase_ , lowerCAmelCase_ )
break
_a = parent
else:
_a = val
_a = temp
self.set_position(lowerCAmelCase_ , 0 )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_a = len(lowerCAmelCase_ ) // 2 - 1
for i in range(lowerCAmelCase_ , -1 , -1 ):
self.top_to_bottom(lowerCAmelCase_ , lowerCAmelCase_ , len(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int ) -> List[Any]:
"""simple docstring"""
_a = positions[0]
_a = sys.maxsize
self.top_to_bottom(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ )
return temp
def snake_case_ (UpperCamelCase : Any ):
'''simple docstring'''
_a = Heap()
_a = [0] * len(UpperCamelCase )
_a = [-1] * len(UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_a = [] # Heap of Distance of vertices from their neighboring vertex
_a = []
for vertex in range(len(UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase )
heap.node_position.append(UpperCamelCase )
_a = []
_a = 1
_a = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_a = 0
_a = distance
heap.heapify(UpperCamelCase , UpperCamelCase )
for _ in range(1 , len(UpperCamelCase ) ):
_a = heap.delete_minimum(UpperCamelCase , UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_a = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase )]
):
_a = distance
heap.bottom_to_top(
UpperCamelCase , heap.get_position(UpperCamelCase ) , UpperCamelCase , UpperCamelCase )
_a = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_snake_case : List[str] = int(input('Enter number of edges: ').strip())
_snake_case : Union[str, Any] = defaultdict(list)
for _ in range(edges_number):
_snake_case : Tuple = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 377 | 1 |
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
_UpperCamelCase = 300 # TEMPERATURE (unit = K)
def _a ( _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="resnet50" , lowerCAmelCase__=3 , lowerCAmelCase__=3_2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = out_indices if out_indices is not None else [4]
__SCREAMING_SNAKE_CASE = stage_names
__SCREAMING_SNAKE_CASE = out_features
__SCREAMING_SNAKE_CASE = backbone
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = use_pretrained_backbone
__SCREAMING_SNAKE_CASE = is_training
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values
def snake_case_ ( self):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = TimmBackbone(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE_ ( __a , __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : List[Any] = (TimmBackbone,) if is_torch_available() else ()
__lowercase : str = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
__lowercase : List[Any] = False
__lowercase : Optional[Any] = False
__lowercase : Dict = False
__lowercase : Optional[int] = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TimmBackboneModelTester(self)
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__)
def snake_case_ ( self):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """resnet18"""
__SCREAMING_SNAKE_CASE = """microsoft/resnet-18"""
__SCREAMING_SNAKE_CASE = AutoBackbone.from_pretrained(lowerCAmelCase__ , use_timm_backbone=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = AutoBackbone.from_pretrained(lowerCAmelCase__)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
__SCREAMING_SNAKE_CASE = AutoBackbone.from_pretrained(lowerCAmelCase__ , use_timm_backbone=lowerCAmelCase__ , out_indices=[1, 2, 3])
__SCREAMING_SNAKE_CASE = AutoBackbone.from_pretrained(lowerCAmelCase__ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""")
def snake_case_ ( self):
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""")
def snake_case_ ( self):
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""")
def snake_case_ ( self):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""")
def snake_case_ ( self):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""")
def snake_case_ ( self):
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""")
def snake_case_ ( self):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""")
def snake_case_ ( self):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""")
def snake_case_ ( self):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""")
def snake_case_ ( self):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""")
def snake_case_ ( self):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""")
def snake_case_ ( self):
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""")
def snake_case_ ( self):
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""")
def snake_case_ ( self):
pass
@unittest.skip("""Safetensors is not supported by timm.""")
def snake_case_ ( self):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""")
def snake_case_ ( self):
pass
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.has_attentions
# no need to test all models as different heads yield the same functionality
__SCREAMING_SNAKE_CASE = self.all_model_classes[0]
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = outputs[0][-1]
# Encoder-/Decoder-only models
__SCREAMING_SNAKE_CASE = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__SCREAMING_SNAKE_CASE = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCAmelCase__)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
__SCREAMING_SNAKE_CASE = copy.deepcopy(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
__SCREAMING_SNAKE_CASE = copy.deepcopy(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__)
| 155 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase : Tuple = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 146 |
from math import ceil
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = list(range(0 , UpperCamelCase__ ) )
__SCREAMING_SNAKE_CASE: Optional[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__SCREAMING_SNAKE_CASE: List[Any] = []
for i in device_map_blocks:
if device_map_blocks.count(UpperCamelCase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(UpperCamelCase__ )
# Missing blocks
__SCREAMING_SNAKE_CASE: Any = [i for i in blocks if i not in device_map_blocks]
__SCREAMING_SNAKE_CASE: List[str] = [i for i in device_map_blocks if i not in blocks]
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(UpperCamelCase__ ) )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = list(range(UpperCamelCase__ ) )
__SCREAMING_SNAKE_CASE: Optional[int] = int(ceil(n_layers / len(UpperCamelCase__ ) ) )
__SCREAMING_SNAKE_CASE: str = [layers[i : i + n_blocks] for i in range(0 , UpperCamelCase__ , UpperCamelCase__ )]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
| 146 | 1 |
import copy
import re
class UpperCamelCase :
lowerCAmelCase : int = """hp"""
lowerCAmelCase : List[str] = {}
lowerCAmelCase : Tuple = None
@classmethod
def __A ( cls , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = prefix
A__ = defaults
cls.build_naming_info()
@staticmethod
def __A ( UpperCAmelCase__ , UpperCAmelCase__ ):
if len(lowerCamelCase_ ) == 0:
return ""
A__ = None
if any(char.isdigit() for char in word ):
raise Exception(F"""Parameters should not contain numbers: \'{word}\' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowerCamelCase_ ) + 1 ):
A__ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
A__ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(UpperCAmelCase__ ):
A__ = ""
while integer != 0:
A__ = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
A__ = 0
while True:
A__ = word + "#" + int_to_alphabetic(lowerCamelCase_ )
if sword in info["reverse_short_word"]:
continue
else:
A__ = sword
break
A__ = short_word
A__ = word
return short_word
@staticmethod
def __A ( UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = param_name.split("_" )
A__ = [TrialShortNamer.shortname_for_word(lowerCamelCase_ , lowerCamelCase_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
A__ = ["", "_"]
for separator in separators:
A__ = separator.join(lowerCamelCase_ )
if shortname not in info["reverse_short_param"]:
A__ = shortname
A__ = param_name
return shortname
return param_name
@staticmethod
def __A ( UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = TrialShortNamer.shortname_for_key(lowerCamelCase_ , lowerCamelCase_ )
A__ = short_name
A__ = param_name
@classmethod
def __A ( cls ):
if cls.NAMING_INFO is not None:
return
A__ = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
A__ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCamelCase_ , lowerCamelCase_ )
A__ = info
@classmethod
def __A ( cls , UpperCAmelCase__ ):
cls.build_naming_info()
assert cls.PREFIX is not None
A__ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
A__ = cls.NAMING_INFO["short_param"][k]
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
A__ = 1 if v else 0
A__ = "" if isinstance(lowerCamelCase_ , (int, float) ) else "-"
A__ = F"""{key}{sep}{v}"""
name.append(lowerCamelCase_ )
return "_".join(lowerCamelCase_ )
@classmethod
def __A ( cls , UpperCAmelCase__ ):
A__ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
A__ = []
else:
A__ = repr.split("_" )
A__ = {}
for value in values:
if "-" in value:
A__ , A__ = value.split("-" )
else:
A__ = re.sub("[0-9.]" , "" , lowerCamelCase_ )
A__ = float(re.sub("[^0-9.]" , "" , lowerCamelCase_ ) )
A__ = cls.NAMING_INFO["reverse_short_param"][p_k]
A__ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
A__ = cls.DEFAULTS[k]
return parameters
| 491 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase_) -> Tuple:
super().__init__(**lowerCamelCase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This is a photo of {}.") -> Union[str, Any]:
UpperCamelCase = load_image(lowerCamelCase_)
UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework)
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(lowerCamelCase_) for x in candidate_labels]
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_)
UpperCamelCase = [text_inputs]
return inputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_inputs.pop('''candidate_labels''')
UpperCamelCase = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , lowerCamelCase_):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_outputs.pop('''candidate_labels''')
UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=-1).squeeze(-1)
UpperCamelCase = probs.tolist()
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [scores]
elif self.framework == "tf":
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1)
UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_) , key=lambda lowerCamelCase_: -x[0])
]
return result | 34 | 0 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase_ ( ) -> Tuple:
raise RuntimeError('''CUDA out of memory.''' )
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> int:
"""simple docstring"""
super().__init__()
UpperCamelCase__ : Tuple = nn.Linear(3, 4 )
UpperCamelCase__ : Tuple = nn.BatchNormad(4 )
UpperCamelCase__ : Tuple = nn.Linear(4, 5 )
def UpperCamelCase__ ( self, __magic_name__ ) -> int:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__magic_name__ ):
nonlocal batch_sizes
batch_sizes.append(__magic_name__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__magic_name__, [128, 64, 32, 16, 8] )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__magic_name__, __magic_name__ ):
nonlocal batch_sizes
batch_sizes.append(__magic_name__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase__ ,UpperCamelCase__ : Dict = mock_training_loop_function('''hello''' )
self.assertListEqual(__magic_name__, [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga], [8, '''hello'''] )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__magic_name__ ):
pass
with self.assertRaises(__magic_name__ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''', cm.exception.args[0] )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__magic_name__ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__magic_name__ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''', cm.exception.args[0] )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__magic_name__, __magic_name__, __magic_name__ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__magic_name__ ) as cm:
mock_training_loop_function(128, '''hello''', '''world''' )
self.assertIn('''Batch size was passed into `f`''', cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''', cm.exception.args[0] )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__magic_name__ ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__magic_name__ ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''', cm.exception.args[0] )
@require_cuda
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Dict = torch.cuda.memory_allocated()
UpperCamelCase__ : int = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated(), __magic_name__ )
UpperCamelCase__ : Optional[int] = release_memory(__magic_name__ )
self.assertEqual(torch.cuda.memory_allocated(), __magic_name__ )
| 369 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 369 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
snake_case = random.Random()
def lowerCamelCase__ ( lowercase , lowercase=1.0 , lowercase=None , lowercase=None ):
"""simple docstring"""
if rng is None:
SCREAMING_SNAKE_CASE : List[str] = global_rng
SCREAMING_SNAKE_CASE : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : int=400 , UpperCAmelCase_ : Optional[int]=2000 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Any=1_6000 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Dict=True , ):
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Tuple = batch_size
SCREAMING_SNAKE_CASE : List[str] = min_seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = max_seq_length
SCREAMING_SNAKE_CASE : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : Tuple = feature_size
SCREAMING_SNAKE_CASE : Optional[Any] = padding_value
SCREAMING_SNAKE_CASE : str = sampling_rate
SCREAMING_SNAKE_CASE : int = return_attention_mask
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
def _A ( self : int ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _A ( self : Any , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Any=False ):
def _flatten(UpperCAmelCase_ : List[str] ):
return list(itertools.chain(*UpperCAmelCase_ ) )
if equal_length:
SCREAMING_SNAKE_CASE : Optional[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : int = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(UpperCAmelCase_ ) for x in speech_inputs]
return speech_inputs
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Any = WavaVecaFeatureExtractor
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaFeatureExtractionTester(self )
def _A ( self : Dict , UpperCAmelCase_ : Union[str, Any] ):
self.assertTrue(np.all(np.mean(UpperCAmelCase_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase_ , axis=0 ) - 1 ) < 1E-3 ) )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Tuple = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
SCREAMING_SNAKE_CASE : Any = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = feat_extract(UpperCAmelCase_ , return_tensors="np" ).input_values
SCREAMING_SNAKE_CASE : Optional[int] = feat_extract(UpperCAmelCase_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract(UpperCAmelCase_ , return_tensors="np" ).input_values
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract(UpperCAmelCase_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Union[str, Any] = ["longest", "max_length", "do_not_pad"]
SCREAMING_SNAKE_CASE : Any = [None, 1600, None]
for max_length, padding in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Dict = feat_extract(UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors="np" )
SCREAMING_SNAKE_CASE : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : Union[str, Any] = range(800 , 1400 , 200 )
SCREAMING_SNAKE_CASE : Tuple = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE : List[Any] = ["longest", "max_length", "do_not_pad"]
SCREAMING_SNAKE_CASE : List[Any] = [None, 1600, None]
for max_length, padding in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract(UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : str = feat_extract(
UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=1000 , padding="max_length" , return_tensors="np" )
SCREAMING_SNAKE_CASE : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract(
UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=1000 , padding="longest" , return_tensors="np" )
SCREAMING_SNAKE_CASE : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
SCREAMING_SNAKE_CASE : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : List[Any] = feat_extract(
UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=2000 , padding="longest" , return_tensors="np" )
SCREAMING_SNAKE_CASE : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def _A ( self : Tuple ):
import torch
SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE : int = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def _A ( self : Optional[int] ):
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
SCREAMING_SNAKE_CASE : List[str] = WavaVecaConfig.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == "layer" )
| 62 |
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __lowerCamelCase ( lowerCamelCase__ : int = 5000 ):
'''simple docstring'''
lowerCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCamelCase__ )]
for i, pentagonal_i in enumerate(lowerCamelCase__ ):
for j in range(lowerCamelCase__ , len(lowerCamelCase__ ) ):
lowerCamelCase = pentagonal_nums[j]
lowerCamelCase = pentagonal_i + pentagonal_j
lowerCamelCase = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCamelCase__ ) and is_pentagonal(lowerCamelCase__ ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 457 | 0 |
def _lowerCAmelCase ( __magic_name__ :int = 1_0_0 ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 715 |
def _lowerCAmelCase ( __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :str ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__magic_name__ , n - 1 , __magic_name__ ) * a) % mod
else:
UpperCAmelCase_ = binary_exponentiation(__magic_name__ , n / 2 , __magic_name__ )
return (b * b) % mod
# a prime number
_lowerCamelCase : Union[str, Any] = 701
_lowerCamelCase : Any = 10_0000_0000
_lowerCamelCase : int = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 407 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A = 3_8_4
if "tiny" in model_name:
__A = [3, 3, 9, 3]
__A = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
__A = [3, 3, 2_7, 3]
__A = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
__A = [3, 3, 2_7, 3]
__A = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
__A = 5_1_2
if "large" in model_name:
__A = [3, 3, 2_7, 3]
__A = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
__A = 7_6_8
if "xlarge" in model_name:
__A = [3, 3, 2_7, 3]
__A = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
__A = 1_0_2_4
# set label information
__A = 1_5_0
__A = "huggingface/label-files"
__A = "ade20k-id2label.json"
__A = json.load(open(hf_hub_download(a_ , a_ , repo_type="dataset" ) , "r" ) )
__A = {int(a_ ): v for k, v in idalabel.items()}
__A = {v: k for k, v in idalabel.items()}
__A = ConvNextConfig(
depths=a_ , hidden_sizes=a_ , out_features=["stage1", "stage2", "stage3", "stage4"] )
__A = UperNetConfig(
backbone_config=a_ , auxiliary_in_channels=a_ , num_labels=a_ , idalabel=a_ , labelaid=a_ , )
return config
def UpperCAmelCase ( a_ ) -> List[str]:
"""simple docstring"""
__A = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
__A = dct.pop(a_ )
__A = val
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
__A = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
__A = model_name_to_url[model_name]
__A = torch.hub.load_state_dict_from_url(a_ , map_location="cpu" )["state_dict"]
__A = get_upernet_config(a_ )
__A = UperNetForSemanticSegmentation(a_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__A = state_dict.pop(a_ )
if "bn" in key:
__A = key.replace("bn" , "batch_norm" )
__A = val
# rename keys
__A = create_rename_keys(a_ )
for src, dest in rename_keys:
rename_key(a_ , a_ , a_ )
model.load_state_dict(a_ )
# verify on image
__A = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__A = Image.open(requests.get(a_ , stream=a_ ).raw ).convert("RGB" )
__A = SegformerImageProcessor()
__A = processor(a_ , return_tensors="pt" ).pixel_values
with torch.no_grad():
__A = model(a_ )
if model_name == "upernet-convnext-tiny":
__A = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
__A = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
__A = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
__A = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
__A = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , a_ , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(a_ )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 55 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''data2vec-vision'''
def __init__( self : Optional[int] , _A : List[Any]=768 , _A : Any=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.0 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Any=1e-12 , _A : Optional[Any]=224 , _A : Union[str, Any]=16 , _A : Tuple=3 , _A : List[Any]=False , _A : List[str]=False , _A : Dict=False , _A : Dict=False , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=True , _A : Dict=[3, 5, 7, 11] , _A : Union[str, Any]=[1, 2, 3, 6] , _A : Optional[Any]=True , _A : Any=0.4 , _A : List[str]=256 , _A : Any=1 , _A : Any=False , _A : Union[str, Any]=255 , **_A : Tuple , ):
"""simple docstring"""
super().__init__(**_A )
__SCREAMING_SNAKE_CASE : Any = hidden_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Any = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = patch_size
__SCREAMING_SNAKE_CASE : Any = num_channels
__SCREAMING_SNAKE_CASE : List[str] = use_mask_token
__SCREAMING_SNAKE_CASE : List[Any] = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias
__SCREAMING_SNAKE_CASE : str = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value
__SCREAMING_SNAKE_CASE : str = drop_path_rate
__SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : str = out_indices
__SCREAMING_SNAKE_CASE : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head
__SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels
__SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs
__SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input
__SCREAMING_SNAKE_CASE : Any = semantic_loss_ignore_index
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return 1e-4
| 74 | 0 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _UpperCAmelCase ( TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
def __init__( self : Union[str, Any] , a : int=None , **a : str ):
'''simple docstring'''
super().__init__(features=a )
lowercase_ : List[str] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def lowerCAmelCase__ ( self : str , a : Dict ):
'''simple docstring'''
import torch
if isinstance(a , a ) and column:
if all(
isinstance(a , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(a )
return column
def lowerCAmelCase__ ( self : Optional[Any] , a : Union[str, Any] ):
'''simple docstring'''
import torch
if isinstance(a , (str, bytes, type(a )) ):
return value
elif isinstance(a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase_ : Optional[int] = {}
if isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowercase_ : List[Any] = {"dtype": torch.intaa}
elif isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase_ : List[Any] = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a , PIL.Image.Image ):
lowercase_ : Dict = np.asarray(a )
return torch.tensor(a , **{**default_dtype, **self.torch_tensor_kwargs} )
def lowerCAmelCase__ ( self : Optional[int] , a : Dict ):
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(a , "__array__" ) and not isinstance(a , torch.Tensor ):
lowercase_ : Tuple = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
elif isinstance(a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
return self._tensorize(a )
def lowerCAmelCase__ ( self : Optional[int] , a : dict ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , a , map_list=a )
def lowerCAmelCase__ ( self : Tuple , a : pa.Table ):
'''simple docstring'''
lowercase_ : Optional[Any] = self.numpy_arrow_extractor().extract_row(a )
lowercase_ : Union[str, Any] = self.python_features_decoder.decode_row(a )
return self.recursive_tensorize(a )
def lowerCAmelCase__ ( self : str , a : pa.Table ):
'''simple docstring'''
lowercase_ : List[str] = self.numpy_arrow_extractor().extract_column(a )
lowercase_ : Any = self.python_features_decoder.decode_column(a , pa_table.column_names[0] )
lowercase_ : Tuple = self.recursive_tensorize(a )
lowercase_ : List[Any] = self._consolidate(a )
return column
def lowerCAmelCase__ ( self : int , a : pa.Table ):
'''simple docstring'''
lowercase_ : Any = self.numpy_arrow_extractor().extract_batch(a )
lowercase_ : List[Any] = self.python_features_decoder.decode_batch(a )
lowercase_ : str = self.recursive_tensorize(a )
for column_name in batch:
lowercase_ : str = self._consolidate(batch[column_name] )
return batch
| 640 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square(_UpperCamelCase , _UpperCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase_ : List[str] = update_area_of_max_square(_UpperCamelCase , col + 1 )
lowercase_ : List[Any] = update_area_of_max_square(row + 1 , col + 1 )
lowercase_ : Tuple = update_area_of_max_square(row + 1 , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Optional[int] = 1 + min([right, diagonal, down] )
lowercase_ : Any = max(largest_square_area[0] , _UpperCamelCase )
return sub_problem_sol
else:
return 0
lowercase_ : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase_ : Dict = update_area_of_max_square_using_dp_array(_UpperCamelCase , col + 1 , _UpperCamelCase )
lowercase_ : str = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _UpperCamelCase )
lowercase_ : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , _UpperCamelCase , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Tuple = 1 + min([right, diagonal, down] )
lowercase_ : int = max(largest_square_area[0] , _UpperCamelCase )
lowercase_ : Dict = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase_ : Any = [0]
lowercase_ : Optional[int] = [[-1] * cols for _ in range(_UpperCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _UpperCamelCase )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase_ : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Tuple = dp_array[row][col + 1]
lowercase_ : List[str] = dp_array[row + 1][col + 1]
lowercase_ : List[Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase_ : Any = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = max(dp_array[row][col] , _UpperCamelCase )
else:
lowercase_ : int = 0
return largest_square_area
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = [0] * (cols + 1)
lowercase_ : Union[str, Any] = [0] * (cols + 1)
lowercase_ : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Dict = current_row[col + 1]
lowercase_ : List[Any] = next_row[col + 1]
lowercase_ : Tuple = next_row[col]
if mat[row][col] == 1:
lowercase_ : Dict = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : int = max(current_row[col] , _UpperCamelCase )
else:
lowercase_ : Tuple = 0
lowercase_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 640 | 1 |
def _lowerCAmelCase ( __magic_name__ :list ):
if len(lowerCamelCase_ ) <= 1:
return lst
UpperCAmelCase_ = 1
while i < len(lowerCamelCase_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
UpperCAmelCase_, UpperCAmelCase_ = lst[i], lst[i - 1]
i -= 1
if i == 0:
UpperCAmelCase_ = 1
return lst
if __name__ == "__main__":
_lowerCamelCase : List[Any] = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase : Union[str, Any] = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 121 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
_SCREAMING_SNAKE_CASE = 5_0_0_0_3
_SCREAMING_SNAKE_CASE = 5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Union[str, Any] = PLBartTokenizer
a : Optional[Any] = None
a : Optional[Any] = False
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = PLBartTokenizer(_lowerCamelCase ,language_codes='''base''' ,keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = PLBartTokenizer(_lowerCamelCase ,language_codes='''base''' ,keep_accents=_lowerCamelCase )
__lowercase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCamelCase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
__lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
__lowercase = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
__lowercase = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
__lowercase = tokenizer.vocab_size
__lowercase = [tokenizer.convert_ids_to_tokens(_lowerCamelCase ) for x in range(end - 4 ,_lowerCamelCase )]
self.assertListEqual(_lowerCamelCase ,['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
__lowercase = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__lowercase = tokenizer(_lowerCamelCase ).input_ids
self.assertEqual(
tokenizer.decode(_lowerCamelCase ,skip_special_tokens=_lowerCamelCase ,clean_up_tokenization_spaces=_lowerCamelCase ) ,_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = PLBartTokenizer(_lowerCamelCase ,language_codes='''multi''' ,keep_accents=_lowerCamelCase )
__lowercase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCamelCase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
__lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
__lowercase = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
__lowercase = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
__lowercase = tokenizer.vocab_size
__lowercase = [tokenizer.convert_ids_to_tokens(_lowerCamelCase ) for x in range(end - 7 ,_lowerCamelCase )]
self.assertListEqual(
_lowerCamelCase ,['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
__lowercase = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__lowercase = tokenizer(_lowerCamelCase ).input_ids
self.assertEqual(
tokenizer.decode(_lowerCamelCase ,skip_special_tokens=_lowerCamelCase ,clean_up_tokenization_spaces=_lowerCamelCase ) ,_lowerCamelCase ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
a : Tuple = "uclanlp/plbart-python-en_XX"
a : List[Any] = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
a : List[Any] = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
a : Union[str, Any] = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def _UpperCAmelCase (cls ) -> Dict:
'''simple docstring'''
__lowercase = PLBartTokenizer.from_pretrained(
cls.checkpoint_name ,language_codes='''base''' ,src_lang='''python''' ,tgt_lang='''en_XX''' )
__lowercase = 1
return cls
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] ,50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] ,50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] ,50003 )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
self.assertIn(_lowerCamelCase ,self.tokenizer.all_special_ids )
__lowercase = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
__lowercase = self.tokenizer.decode(_lowerCamelCase ,skip_special_tokens=_lowerCamelCase )
__lowercase = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] ,_lowerCamelCase )
__lowercase = 10
__lowercase = self.tokenizer(_lowerCamelCase ,max_length=_lowerCamelCase ,truncation=_lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-2] ,2 )
self.assertEqual(ids[-1] ,_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) ,[50004, 50001] )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
__lowercase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowerCamelCase )
__lowercase = PLBartTokenizer.from_pretrained(_lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,_lowerCamelCase )
@require_torch
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=_lowerCamelCase ,return_tensors='''pt''' )
__lowercase = shift_tokens_right(batch['''labels'''] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() ,[2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] ,_lowerCamelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] ,2 )
self.assertEqual(batch.labels[1][-2:].tolist() ,[2, EN_CODE] )
@require_torch
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=_lowerCamelCase ,truncation=_lowerCamelCase ,max_length=len(self.expected_src_tokens ) ,return_tensors='''pt''' ,)
__lowercase = shift_tokens_right(batch['''labels'''] ,self.tokenizer.pad_token_id )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
self.assertEqual((2, 26) ,batch.input_ids.shape )
self.assertEqual((2, 26) ,batch.attention_mask.shape )
__lowercase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,_lowerCamelCase )
self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, PYTHON_CODE] )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.tokenizer(self.src_text ,padding=_lowerCamelCase ,truncation=_lowerCamelCase ,max_length=3 ,return_tensors='''pt''' )
__lowercase = self.tokenizer(
text_target=self.tgt_text ,padding=_lowerCamelCase ,truncation=_lowerCamelCase ,max_length=10 ,return_tensors='''pt''' )
__lowercase = targets['''input_ids''']
__lowercase = shift_tokens_right(_lowerCamelCase ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.tokenizer._build_translation_inputs(
'''A test''' ,return_tensors='''pt''' ,src_lang='''en_XX''' ,tgt_lang='''java''' )
self.assertEqual(
nested_simplify(_lowerCamelCase ) ,{
# A, test, EOS, en_XX
'''input_ids''': [[150, 242, 2, 50003]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 50001,
} ,)
| 502 | 0 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowercase : List[str] = TypeVar('''KT''')
lowercase : Optional[Any] = TypeVar('''VT''')
class UpperCAmelCase_ ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE = "root" , _SCREAMING_SNAKE_CASE = None ) -> Dict:
snake_case_ : int = key
snake_case_ : Dict = value
snake_case_ : list[Node[KT, VT]] = []
def __repr__( self ) -> str:
return f'''Node({self.key}: {self.value})'''
@property
def _lowerCAmelCase ( self ) -> int:
return len(self.forward )
class UpperCAmelCase_ ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE = 0.5 , _SCREAMING_SNAKE_CASE = 16 ) -> Union[str, Any]:
snake_case_ : Node[KT, VT] = Node[KT, VT]()
snake_case_ : Optional[int] = 0
snake_case_ : Tuple = p
snake_case_ : Union[str, Any] = max_level
def __str__( self ) -> str:
snake_case_ : List[str] = list(self )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return f'''SkipList(level={self.level})'''
snake_case_ : Any = max((len(str(_SCREAMING_SNAKE_CASE ) ) for item in items) , default=4 )
snake_case_ : List[Any] = max(_SCREAMING_SNAKE_CASE , 4 ) + 4
snake_case_ : Optional[int] = self.head
snake_case_ : List[Any] = []
snake_case_ : str = node.forward.copy()
lines.append(f'''[{node.key}]'''.ljust(_SCREAMING_SNAKE_CASE , "-" ) + "* " * len(_SCREAMING_SNAKE_CASE ) )
lines.append(" " * label_size + "| " * len(_SCREAMING_SNAKE_CASE ) )
while len(node.forward ) != 0:
snake_case_ : Union[str, Any] = node.forward[0]
lines.append(
f'''[{node.key}]'''.ljust(_SCREAMING_SNAKE_CASE , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(_SCREAMING_SNAKE_CASE ) )
snake_case_ : Optional[int] = node.forward
lines.append("None".ljust(_SCREAMING_SNAKE_CASE ) + "* " * len(_SCREAMING_SNAKE_CASE ) )
return f'''SkipList(level={self.level})\n''' + "\n".join(_SCREAMING_SNAKE_CASE )
def __iter__( self ) -> str:
snake_case_ : Union[str, Any] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
snake_case_ : Union[str, Any] = node.forward[0]
def _lowerCAmelCase ( self ) -> int:
snake_case_ : List[Any] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
snake_case_ : List[str] = []
snake_case_ : List[str] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
snake_case_ : List[str] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_SCREAMING_SNAKE_CASE )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
snake_case_ , snake_case_ : Optional[int] = self._locate_node(_SCREAMING_SNAKE_CASE )
if node is not None:
for i, update_node in enumerate(_SCREAMING_SNAKE_CASE ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
snake_case_ : Any = node.forward[i]
else:
snake_case_ : Union[str, Any] = update_node.forward[:i]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ , snake_case_ : List[Any] = self._locate_node(_SCREAMING_SNAKE_CASE )
if node is not None:
snake_case_ : Dict = value
else:
snake_case_ : Dict = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _SCREAMING_SNAKE_CASE ):
update_vector.append(self.head )
snake_case_ : Any = level
snake_case_ : List[Any] = Node(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_SCREAMING_SNAKE_CASE )
else:
snake_case_ : Any = new_node
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> VT | None:
snake_case_ , snake_case_ : Optional[Any] = self._locate_node(_SCREAMING_SNAKE_CASE )
if node is not None:
return node.value
return None
def lowerCAmelCase__ ( ):
snake_case_ : Union[str, Any] = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 12 )
skip_list.insert("Key3" , 41 )
skip_list.insert("Key4" , -19 )
snake_case_ : List[str] = skip_list.head
snake_case_ : str = {}
while node.level != 0:
snake_case_ : Optional[int] = node.forward[0]
snake_case_ : int = node.value
assert len(_a ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def lowerCAmelCase__ ( ):
snake_case_ : Optional[int] = SkipList()
skip_list.insert("Key1" , 10 )
skip_list.insert("Key1" , 12 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 10 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 10 )
snake_case_ : int = skip_list.head
snake_case_ : int = {}
while node.level != 0:
snake_case_ : Any = node.forward[0]
snake_case_ : List[Any] = node.value
if len(_a ) != 4:
print()
assert len(_a ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def lowerCAmelCase__ ( ):
snake_case_ : Optional[int] = SkipList()
assert skip_list.find("Some key" ) is None
def lowerCAmelCase__ ( ):
snake_case_ : int = SkipList()
skip_list.insert("Key2" , 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key" , 10 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def lowerCAmelCase__ ( ):
snake_case_ : List[str] = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def lowerCAmelCase__ ( ):
snake_case_ : List[Any] = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def lowerCAmelCase__ ( ):
snake_case_ : Any = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def lowerCAmelCase__ ( ):
snake_case_ : int = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 1_42 )
skip_list.insert("Key2" , 15 )
skip_list.delete("X" )
def traverse_keys(_a : Any ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_a )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def lowerCAmelCase__ ( ):
def is_sorted(_a : Tuple ):
return all(next_item >= item for item, next_item in zip(_a , lst[1:] ) )
snake_case_ : Any = SkipList()
for i in range(10 ):
skip_list.insert(_a , _a )
assert is_sorted(list(_a ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_a ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_a ) )
def lowerCAmelCase__ ( ):
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def lowerCAmelCase__ ( ):
snake_case_ : Dict = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(_a )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 114 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Any = logging.get_logger(__name__)
lowercase : str = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = 'donut-swin'
A : Union[str, Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=96 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
super().__init__(**_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = image_size
snake_case_ : Any = patch_size
snake_case_ : str = num_channels
snake_case_ : Dict = embed_dim
snake_case_ : Tuple = depths
snake_case_ : List[Any] = len(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = num_heads
snake_case_ : Optional[int] = window_size
snake_case_ : Any = mlp_ratio
snake_case_ : str = qkv_bias
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Union[str, Any] = attention_probs_dropout_prob
snake_case_ : str = drop_path_rate
snake_case_ : List[str] = hidden_act
snake_case_ : Optional[int] = use_absolute_embeddings
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Any = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
| 114 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ):
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 528 | """simple docstring"""
def _lowerCamelCase( a ):
return " ".join(
"".join(word[::-1] ) if len(a ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 528 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
"""configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ["""AlbertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = ["""AlbertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AlbertForMaskedLM""",
"""AlbertForMultipleChoice""",
"""AlbertForPreTraining""",
"""AlbertForQuestionAnswering""",
"""AlbertForSequenceClassification""",
"""AlbertForTokenClassification""",
"""AlbertModel""",
"""AlbertPreTrainedModel""",
"""load_tf_weights_in_albert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAlbertForMaskedLM""",
"""TFAlbertForMultipleChoice""",
"""TFAlbertForPreTraining""",
"""TFAlbertForQuestionAnswering""",
"""TFAlbertForSequenceClassification""",
"""TFAlbertForTokenClassification""",
"""TFAlbertMainLayer""",
"""TFAlbertModel""",
"""TFAlbertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""FlaxAlbertForMaskedLM""",
"""FlaxAlbertForMultipleChoice""",
"""FlaxAlbertForPreTraining""",
"""FlaxAlbertForQuestionAnswering""",
"""FlaxAlbertForSequenceClassification""",
"""FlaxAlbertForTokenClassification""",
"""FlaxAlbertModel""",
"""FlaxAlbertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 |
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase : List[str] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def lowercase (_A , _A ):
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase (_A ):
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_A )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = tmp_path_factory.getbasetemp() / 'cache'
_lowerCAmelCase : Dict = test_hf_cache_home / 'datasets'
_lowerCAmelCase : List[Any] = test_hf_cache_home / 'metrics'
_lowerCAmelCase : List[Any] = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_A ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_A ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_A ) )
_lowerCAmelCase : Dict = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_A ) )
_lowerCAmelCase : Union[str, Any] = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_A ) )
@pytest.fixture(autouse=_A , scope='session' )
def lowercase ():
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_A )
def lowercase (_A ):
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _A )
@pytest.fixture
def lowercase (_A ):
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _A )
| 630 | 1 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , 'num_attention_heads' ) )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=640 , _UpperCAmelCase=4 , _UpperCAmelCase="silu" , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=None , ):
__snake_case : Optional[Any] = parent
__snake_case : str = batch_size
__snake_case : Any = image_size
__snake_case : Optional[Any] = patch_size
__snake_case : Any = num_channels
__snake_case : str = last_hidden_size
__snake_case : Tuple = num_attention_heads
__snake_case : Union[str, Any] = hidden_act
__snake_case : Optional[int] = conv_kernel_size
__snake_case : List[str] = output_stride
__snake_case : str = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : Dict = classifier_dropout_prob
__snake_case : Optional[int] = use_labels
__snake_case : Optional[Any] = is_training
__snake_case : Tuple = num_labels
__snake_case : List[Any] = initializer_range
__snake_case : Union[str, Any] = scope
def lowercase_ ( self ):
__snake_case : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[int] = None
__snake_case : List[Any] = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = MobileViTModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__snake_case : List[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = self.num_labels
__snake_case : int = MobileViTForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__snake_case : List[Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[Any] = self.num_labels
__snake_case : int = MobileViTForSemanticSegmentation(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__snake_case : str = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : Dict = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = config_and_inputs
__snake_case : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Tuple = MobileViTModelTester(self )
__snake_case : Any = MobileViTConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : int = model_class(UpperCAmelCase_ )
__snake_case : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Optional[int] = [*signature.parameters.keys()]
__snake_case : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowercase_ ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
__snake_case : Optional[int] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
__snake_case : List[Any] = outputs.hidden_states
__snake_case : Dict = 5
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : List[Any] = 2
for i in range(len(UpperCAmelCase_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : int = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Optional[int] = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
def lowercase_ ( self ):
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase_ )
@slow
def lowercase_ ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[Any] = MobileViTModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCAmelCase__( ):
__snake_case : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
__snake_case : Union[str, Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(UpperCAmelCase_ )
__snake_case : Tuple = self.default_image_processor
__snake_case : Optional[int] = prepare_img()
__snake_case : Any = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
__snake_case : str = model(**UpperCAmelCase_ )
# verify the logits
__snake_case : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
__snake_case : Tuple = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : List[Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(UpperCAmelCase_ )
__snake_case : int = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Any = prepare_img()
__snake_case : int = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
__snake_case : Optional[int] = model(**UpperCAmelCase_ )
__snake_case : Any = outputs.logits
# verify the logits
__snake_case : Optional[int] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCAmelCase_ )
__snake_case : str = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=UpperCAmelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : List[Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : List[Any] = model.to(UpperCAmelCase_ )
__snake_case : str = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = prepare_img()
__snake_case : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
__snake_case : int = model(**UpperCAmelCase_ )
__snake_case : Any = outputs.logits.detach().cpu()
__snake_case : str = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_ , target_sizes=[(50, 60)] )
__snake_case : Dict = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase_ )
__snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_ )
__snake_case : Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase_ )
| 576 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __snake_case ( lowercase : np.ndarray , lowercase : float ):
# For applying gaussian function for each element in matrix.
snake_case_ = math.sqrt(lowercase )
snake_case_ = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __snake_case ( lowercase : np.ndarray , lowercase : int , lowercase : int , lowercase : int ):
snake_case_ = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __snake_case ( lowercase : int , lowercase : float ):
# Creates a gaussian kernel of given dimension.
snake_case_ = np.zeros((kernel_size, kernel_size) )
for i in range(0 , lowercase ):
for j in range(0 , lowercase ):
snake_case_ = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(lowercase , lowercase )
def __snake_case ( lowercase : np.ndarray , lowercase : float , lowercase : float , lowercase : int , ):
snake_case_ = np.zeros(img.shape )
snake_case_ = get_gauss_kernel(lowercase , lowercase )
snake_case_ , snake_case_ = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
snake_case_ = get_slice(lowercase , lowercase , lowercase , lowercase )
snake_case_ = img_s - img_s[kernel_size // 2, kernel_size // 2]
snake_case_ = vec_gaussian(lowercase , lowercase )
snake_case_ = np.multiply(lowercase , lowercase )
snake_case_ = np.multiply(lowercase , lowercase )
snake_case_ = np.sum(lowercase ) / np.sum(lowercase )
snake_case_ = val
return imga
def __snake_case ( lowercase : list ):
snake_case_ = args[1] if args[1:] else "../image_data/lena.jpg"
snake_case_ = float(args[2] ) if args[2:] else 1.0
snake_case_ = float(args[3] ) if args[3:] else 1.0
if args[4:]:
snake_case_ = int(args[4] )
snake_case_ = kernel_size + abs(kernel_size % 2 - 1 )
else:
snake_case_ = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase__ , lowercase__ , lowercase__ , lowercase__ = parse_args(sys.argv)
lowercase__ = cva.imread(filename, 0)
cva.imshow('''input image''', img)
lowercase__ = img / 2_55
lowercase__ = out.astype('''float32''')
lowercase__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase__ = out * 2_55
lowercase__ = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 508 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def UpperCamelCase ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=False ):
__a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__a = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCamelCase ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__a = """"""
else:
__a = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
__a = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__a = in_proj_weight[
: config.hidden_size, :
]
__a = in_proj_bias[: config.hidden_size]
__a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a = in_proj_weight[
-config.hidden_size :, :
]
__a = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any] ):
__a = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : str ):
__a = dct.pop(_lowerCAmelCase )
__a = val
def UpperCamelCase ( ):
__a = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__a = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ):
__a = ViTConfig()
__a = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
__a = True
__a = int(vit_name[-12:-10] )
__a = int(vit_name[-9:-6] )
else:
__a = 1000
__a = """huggingface/label-files"""
__a = """imagenet-1k-id2label.json"""
__a = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
__a = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = int(vit_name[-6:-4] )
__a = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
__a = 192
__a = 768
__a = 12
__a = 3
elif vit_name[9:].startswith("""small""" ):
__a = 384
__a = 1536
__a = 12
__a = 6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
__a = 768
__a = 2304
__a = 8
__a = 8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
__a = 1024
__a = 4096
__a = 24
__a = 16
elif vit_name[4:].startswith("""huge""" ):
__a = 1280
__a = 5120
__a = 32
__a = 16
# load original model from timm
__a = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__a = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
__a = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
__a = ViTModel(_lowerCAmelCase ).eval()
else:
__a = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
__a = DeiTImageProcessor(size=config.image_size )
else:
__a = ViTImageProcessor(size=config.image_size )
__a = image_processor(images=prepare_img() , return_tensors="""pt""" )
__a = encoding["""pixel_values"""]
__a = model(_lowerCAmelCase )
if base_model:
__a = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
__a = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__A = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 173 | """simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def UpperCamelCase ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
__a = quote(_lowerCAmelCase )
return hfh.hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" , revision=_lowerCAmelCase )
| 173 | 1 |