code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """tf_padding"""))
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """depth_multiplier"""))
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str]=1_3 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Optional[Any]=3_2 , UpperCamelCase__ : List[str]=0.25 , UpperCamelCase__ : Optional[int]=8 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : List[Any]=6 , UpperCamelCase__ : Tuple=3_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str="relu6" , UpperCamelCase__ : Any=1_2_8_0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[int]=1_0 , UpperCamelCase__ : Union[str, Any]=None , ):
'''simple docstring'''
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = image_size
snake_case__ = depth_multiplier
snake_case__ = depth_divisible_by
snake_case__ = min_depth
snake_case__ = expand_ratio
snake_case__ = tf_padding
snake_case__ = output_stride
snake_case__ = first_layer_is_expansion
snake_case__ = finegrained_output
snake_case__ = hidden_act
snake_case__ = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier)
snake_case__ = classifier_dropout_prob
snake_case__ = use_labels
snake_case__ = is_training
snake_case__ = num_labels
snake_case__ = initializer_range
snake_case__ = scope
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.num_labels)
snake_case__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
snake_case__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : int):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = MobileNetVaModel(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
snake_case__ = model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = self.num_labels
snake_case__ = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
snake_case__ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = self.num_labels
snake_case__ = MobileNetVaForSemanticSegmentation(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
snake_case__ = model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case__ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : List[str] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowercase : Optional[int] = (
{
'feature-extraction': MobileNetVaModel,
'image-classification': MobileNetVaForImageClassification,
'image-segmentation': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase : Optional[Any] = False
_lowercase : str = False
_lowercase : Optional[int] = False
_lowercase : List[Any] = False
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = MobileNetVaModelTester(self)
snake_case__ = MobileNetVaConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE)
def __magic_name__ ( self : str):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""")
def __magic_name__ ( self : List[str]):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""")
def __magic_name__ ( self : Tuple):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""")
def __magic_name__ ( self : str):
'''simple docstring'''
pass
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_SCREAMING_SNAKE_CASE)
snake_case__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict):
snake_case__ = model_class(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
snake_case__ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE))
snake_case__ = outputs.hidden_states
snake_case__ = 1_6
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE)
@slow
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = MobileNetVaModel.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
def _UpperCAmelCase ( ):
snake_case__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""") if is_vision_available() else None
)
@slow
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""").to(_SCREAMING_SNAKE_CASE)
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(_SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
snake_case__ = model(**_SCREAMING_SNAKE_CASE)
# verify the logits
snake_case__ = torch.Size((1, 1_0_0_1))
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE)
snake_case__ = torch.tensor([0.24_45, -1.19_93, 0.19_05]).to(_SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4))
@slow
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
snake_case__ = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""")
snake_case__ = model.to(_SCREAMING_SNAKE_CASE)
snake_case__ = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""")
snake_case__ = prepare_img()
snake_case__ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(_SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
snake_case__ = model(**_SCREAMING_SNAKE_CASE)
snake_case__ = outputs.logits
# verify the logits
snake_case__ = torch.Size((1, 2_1, 6_5, 6_5))
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE)
snake_case__ = torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]],
[[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]],
] , device=_SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4))
| 654 |
def lowerCAmelCase__ ( a__: int , a__: int ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(a__ , x % y )
def lowerCAmelCase__ ( a__: int , a__: int ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(a__ , a__ )
def lowerCAmelCase__ ( a__: int = 2_0 ) -> int:
'''simple docstring'''
_UpperCAmelCase = 1
for i in range(1 , n + 1 ):
_UpperCAmelCase = lcm(a__ , a__ )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 618 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=4_00 , __a=True , __a=None , __a=True , ):
__lowerCAmelCase = size if size is not None else {"height": 18, "width": 18}
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = apply_ocr
def snake_case ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _UpperCamelCase ( lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ):
__lowerCAmelCase = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "size" ) )
self.assertTrue(hasattr(__a , "apply_ocr" ) )
def snake_case ( self ):
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , __a )
self.assertIsInstance(encoding.boxes , __a )
# Test batched
__lowerCAmelCase = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def snake_case ( self ):
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__lowerCAmelCase = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def snake_case ( self ):
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__lowerCAmelCase = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def snake_case ( self ):
# with apply_OCR = True
__lowerCAmelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowerCAmelCase = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
__lowerCAmelCase = Image.open(ds[0]["file"] ).convert("RGB" )
__lowerCAmelCase = image_processing(__a , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowerCAmelCase = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
__lowerCAmelCase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __a )
self.assertListEqual(encoding.boxes , __a )
# with apply_OCR = False
__lowerCAmelCase = LayoutLMvaImageProcessor(apply_ocr=__a )
__lowerCAmelCase = image_processing(__a , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 282 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A : Optional[int] = logging.get_logger(__name__)
A : List[str] = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any ="""bit"""
__UpperCAmelCase : Optional[int] =["""preactivation""", """bottleneck"""]
__UpperCAmelCase : List[str] =["""SAME""", """VALID"""]
def __init__( self , __a=3 , __a=64 , __a=[2_56, 5_12, 10_24, 20_48] , __a=[3, 4, 6, 3] , __a="preactivation" , __a="relu" , __a=None , __a=32 , __a=0.0 , __a=False , __a=32 , __a=1 , __a=None , __a=None , **__a , ):
super().__init__(**__a )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__lowerCAmelCase = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
__lowerCAmelCase = num_channels
__lowerCAmelCase = embedding_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = layer_type
__lowerCAmelCase = hidden_act
__lowerCAmelCase = global_padding
__lowerCAmelCase = num_groups
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = embedding_dynamic_padding
__lowerCAmelCase = output_stride
__lowerCAmelCase = width_factor
__lowerCAmelCase = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__a ) + 1 )]
__lowerCAmelCase , __lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 282 | 1 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__lowercase : Optional[int] =logging.get_logger(__name__)
# General docstring
__lowercase : Tuple ="""PoolFormerConfig"""
# Base docstring
__lowercase : Union[str, Any] ="""sail/poolformer_s12"""
__lowercase : int =[1, 512, 7, 7]
# Image classification docstring
__lowercase : Tuple ="""sail/poolformer_s12"""
__lowercase : Optional[Any] ="""tabby, tabby cat"""
__lowercase : List[str] =[
"""sail/poolformer_s12""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def a__ ( lowercase__ , lowercase__ = 0.0 , lowercase__ = False ):
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
UpperCAmelCase_ =1 - drop_prob
UpperCAmelCase_ =(input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
UpperCAmelCase_ =keep_prob + torch.rand(lowercase__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
UpperCAmelCase_ =input.div(lowercase__ ) * random_tensor
return output
class A ( nn.Module ):
def __init__( self: Optional[Any] , _lowerCAmelCase: Optional[float] = None ) -> None:
'''simple docstring'''
super().__init__()
UpperCAmelCase_ =drop_prob
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return drop_path(_lowerCAmelCase , self.drop_prob , self.training )
def lowerCAmelCase__ ( self: Tuple ) -> str:
'''simple docstring'''
return "p={}".format(self.drop_prob )
class A ( nn.Module ):
def __init__( self: str , _lowerCAmelCase: Dict , _lowerCAmelCase: str , _lowerCAmelCase: Tuple , _lowerCAmelCase: str , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[Any]=None ) -> List[str]:
'''simple docstring'''
super().__init__()
UpperCAmelCase_ =patch_size if isinstance(_lowerCAmelCase , collections.abc.Iterable ) else (patch_size, patch_size)
UpperCAmelCase_ =stride if isinstance(_lowerCAmelCase , collections.abc.Iterable ) else (stride, stride)
UpperCAmelCase_ =padding if isinstance(_lowerCAmelCase , collections.abc.Iterable ) else (padding, padding)
UpperCAmelCase_ =nn.Convad(_lowerCAmelCase , _lowerCAmelCase , kernel_size=_lowerCAmelCase , stride=_lowerCAmelCase , padding=_lowerCAmelCase )
UpperCAmelCase_ =norm_layer(_lowerCAmelCase ) if norm_layer else nn.Identity()
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.projection(_lowerCAmelCase )
UpperCAmelCase_ =self.norm(_lowerCAmelCase )
return embeddings
class A ( nn.GroupNorm ):
def __init__( self: int , _lowerCAmelCase: int , **_lowerCAmelCase: Any ) -> Tuple:
'''simple docstring'''
super().__init__(1 , _lowerCAmelCase , **_lowerCAmelCase )
class A ( nn.Module ):
def __init__( self: Tuple , _lowerCAmelCase: int ) -> List[Any]:
'''simple docstring'''
super().__init__()
UpperCAmelCase_ =nn.AvgPoolad(_lowerCAmelCase , stride=1 , padding=pool_size // 2 , count_include_pad=_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.pool(_lowerCAmelCase ) - hidden_states
class A ( nn.Module ):
def __init__( self: Any , _lowerCAmelCase: int , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: List[str] ) -> Any:
'''simple docstring'''
super().__init__()
UpperCAmelCase_ =nn.Convad(_lowerCAmelCase , _lowerCAmelCase , 1 )
UpperCAmelCase_ =nn.Convad(_lowerCAmelCase , _lowerCAmelCase , 1 )
UpperCAmelCase_ =PoolFormerDropPath(_lowerCAmelCase )
if isinstance(config.hidden_act , _lowerCAmelCase ):
UpperCAmelCase_ =ACTaFN[config.hidden_act]
else:
UpperCAmelCase_ =config.hidden_act
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.conva(_lowerCAmelCase )
UpperCAmelCase_ =self.act_fn(_lowerCAmelCase )
UpperCAmelCase_ =self.drop(_lowerCAmelCase )
UpperCAmelCase_ =self.conva(_lowerCAmelCase )
UpperCAmelCase_ =self.drop(_lowerCAmelCase )
return hidden_states
class A ( nn.Module ):
def __init__( self: str , _lowerCAmelCase: str , _lowerCAmelCase: Any , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Any ) -> List[Any]:
'''simple docstring'''
super().__init__()
UpperCAmelCase_ =PoolFormerPooling(_lowerCAmelCase )
UpperCAmelCase_ =PoolFormerOutput(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =PoolFormerGroupNorm(_lowerCAmelCase )
UpperCAmelCase_ =PoolFormerGroupNorm(_lowerCAmelCase )
# Useful for training neural nets
UpperCAmelCase_ =PoolFormerDropPath(_lowerCAmelCase ) if drop_path > 0.0 else nn.Identity()
UpperCAmelCase_ =config.use_layer_scale
if config.use_layer_scale:
UpperCAmelCase_ =nn.Parameter(
config.layer_scale_init_value * torch.ones((_lowerCAmelCase) ) , requires_grad=_lowerCAmelCase )
UpperCAmelCase_ =nn.Parameter(
config.layer_scale_init_value * torch.ones((_lowerCAmelCase) ) , requires_grad=_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: List[str] ) -> List[Any]:
'''simple docstring'''
if self.use_layer_scale:
UpperCAmelCase_ =self.pooling(self.before_norm(_lowerCAmelCase ) )
UpperCAmelCase_ =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
UpperCAmelCase_ =hidden_states + self.drop_path(_lowerCAmelCase )
UpperCAmelCase_ =()
UpperCAmelCase_ =self.output(self.after_norm(_lowerCAmelCase ) )
UpperCAmelCase_ =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
UpperCAmelCase_ =hidden_states + self.drop_path(_lowerCAmelCase )
UpperCAmelCase_ =(output,) + outputs
return outputs
else:
UpperCAmelCase_ =self.drop_path(self.pooling(self.before_norm(_lowerCAmelCase ) ) )
# First residual connection
UpperCAmelCase_ =pooling_output + hidden_states
UpperCAmelCase_ =()
# Second residual connection inside the PoolFormerOutput block
UpperCAmelCase_ =self.drop_path(self.output(self.after_norm(_lowerCAmelCase ) ) )
UpperCAmelCase_ =hidden_states + layer_output
UpperCAmelCase_ =(output,) + outputs
return outputs
class A ( nn.Module ):
def __init__( self: Union[str, Any] , _lowerCAmelCase: Dict ) -> str:
'''simple docstring'''
super().__init__()
UpperCAmelCase_ =config
# stochastic depth decay rule
UpperCAmelCase_ =[x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
UpperCAmelCase_ =[]
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
UpperCAmelCase_ =nn.ModuleList(_lowerCAmelCase )
# Transformer blocks
UpperCAmelCase_ =[]
UpperCAmelCase_ =0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
UpperCAmelCase_ =[]
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_lowerCAmelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(_lowerCAmelCase ) )
UpperCAmelCase_ =nn.ModuleList(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Optional[Any]=False , _lowerCAmelCase: Dict=True ) -> str:
'''simple docstring'''
UpperCAmelCase_ =() if output_hidden_states else None
UpperCAmelCase_ =pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
UpperCAmelCase_ , UpperCAmelCase_ =layers
# Get patch embeddings from hidden_states
UpperCAmelCase_ =embedding_layer(_lowerCAmelCase )
# Send the embeddings through the blocks
for _, blk in enumerate(_lowerCAmelCase ):
UpperCAmelCase_ =blk(_lowerCAmelCase )
UpperCAmelCase_ =layer_outputs[0]
if output_hidden_states:
UpperCAmelCase_ =all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_lowerCAmelCase , hidden_states=_lowerCAmelCase )
class A ( __lowercase ):
_snake_case =PoolFormerConfig
_snake_case ='''poolformer'''
_snake_case ='''pixel_values'''
_snake_case =True
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: str ) -> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_lowerCAmelCase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Tuple=False ) -> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =value
__lowercase : Union[str, Any] =R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
__lowercase : Union[str, Any] =R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
"""
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , __lowercase , )
class A ( __lowercase ):
def __init__( self: Any , _lowerCAmelCase: List[Any] ) -> str:
'''simple docstring'''
super().__init__(_lowerCAmelCase )
UpperCAmelCase_ =config
UpperCAmelCase_ =PoolFormerEncoder(_lowerCAmelCase )
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase__ ( self: str ) -> int:
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: Optional[torch.FloatTensor] = None , _lowerCAmelCase: Optional[bool] = None , _lowerCAmelCase: Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
'''simple docstring'''
UpperCAmelCase_ =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ =return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
UpperCAmelCase_ =self.encoder(
_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , )
UpperCAmelCase_ =encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
class A ( nn.Module ):
def __init__( self: str , _lowerCAmelCase: Dict ) -> Dict:
'''simple docstring'''
super().__init__()
UpperCAmelCase_ =nn.Linear(config.hidden_size , config.hidden_size )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =self.dense(_lowerCAmelCase )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , __lowercase , )
class A ( __lowercase ):
def __init__( self: Union[str, Any] , _lowerCAmelCase: Optional[int] ) -> Dict:
'''simple docstring'''
super().__init__(_lowerCAmelCase )
UpperCAmelCase_ =config.num_labels
UpperCAmelCase_ =PoolFormerModel(_lowerCAmelCase )
# Final norm
UpperCAmelCase_ =PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
UpperCAmelCase_ =(
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: Optional[torch.FloatTensor] = None , _lowerCAmelCase: Optional[torch.LongTensor] = None , _lowerCAmelCase: Optional[bool] = None , _lowerCAmelCase: Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
'''simple docstring'''
UpperCAmelCase_ =return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ =self.poolformer(
_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , )
UpperCAmelCase_ =outputs[0]
UpperCAmelCase_ =self.classifier(self.norm(_lowerCAmelCase ).mean([-2, -1] ) )
UpperCAmelCase_ =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCAmelCase_ ="regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCAmelCase_ ="single_label_classification"
else:
UpperCAmelCase_ ="multi_label_classification"
if self.config.problem_type == "regression":
UpperCAmelCase_ =MSELoss()
if self.num_labels == 1:
UpperCAmelCase_ =loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCAmelCase_ =loss_fct(_lowerCAmelCase , _lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
UpperCAmelCase_ =CrossEntropyLoss()
UpperCAmelCase_ =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCAmelCase_ =BCEWithLogitsLoss()
UpperCAmelCase_ =loss_fct(_lowerCAmelCase , _lowerCAmelCase )
if not return_dict:
UpperCAmelCase_ =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_lowerCAmelCase , logits=_lowerCAmelCase , hidden_states=outputs.hidden_states )
| 54 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Tuple = ["image_processor", "tokenizer"]
__A : Any = "ViTImageProcessor"
__A : Dict = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __A=None , __A=None , **__A ):
"""simple docstring"""
lowerCamelCase : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __A , )
lowerCamelCase : Optional[Any] = kwargs.pop("feature_extractor" )
lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , __A=None , **__A ):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCamelCase : Any = self.tokenizer(__A , return_tensors=__A , **__A )
if visual_prompt is not None:
lowerCamelCase : Optional[Any] = self.image_processor(__A , return_tensors=__A , **__A )
if images is not None:
lowerCamelCase : Optional[int] = self.image_processor(__A , return_tensors=__A , **__A )
if visual_prompt is not None and images is not None:
lowerCamelCase : Tuple = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCamelCase : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCamelCase : Optional[int] = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __A , )
return self.image_processor_class
@property
def _snake_case ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __A , )
return self.image_processor
| 340 | 0 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def snake_case_ ( SCREAMING_SNAKE_CASE__="ro" , SCREAMING_SNAKE_CASE__="en" , SCREAMING_SNAKE_CASE__="wmt16" , SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
_snake_case = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
_snake_case = datasets.load_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if save_dir is None:
_snake_case = f'''{dataset}-{pair}'''
_snake_case = Path(SCREAMING_SNAKE_CASE__ )
save_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
_snake_case = "val" if split == "validation" else split
_snake_case = save_dir.joinpath(f'''{fn}.source''' )
_snake_case = save_dir.joinpath(f'''{fn}.target''' )
_snake_case = src_path.open("w+" )
_snake_case = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
_snake_case = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 368 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__magic_name__ : List[str] = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
requires_backends(SCREAMING_SNAKE_CASE__ , "sklearn" )
return (preds == labels).mean()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
requires_backends(SCREAMING_SNAKE_CASE__ , "sklearn" )
_snake_case = simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case = fa_score(y_true=SCREAMING_SNAKE_CASE__ , y_pred=SCREAMING_SNAKE_CASE__ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
requires_backends(SCREAMING_SNAKE_CASE__ , "sklearn" )
_snake_case = pearsonr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
_snake_case = spearmanr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
requires_backends(SCREAMING_SNAKE_CASE__ , "sklearn" )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ), f'''Predictions and labels have mismatched lengths {len(SCREAMING_SNAKE_CASE__ )} and {len(SCREAMING_SNAKE_CASE__ )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif task_name == "mrpc":
return acc_and_fa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif task_name == "sts-b":
return pearson_and_spearman(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif task_name == "qqp":
return acc_and_fa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif task_name == "rte":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif task_name == "hans":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
else:
raise KeyError(SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
requires_backends(SCREAMING_SNAKE_CASE__ , "sklearn" )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(f'''Predictions and labels have mismatched lengths {len(SCREAMING_SNAKE_CASE__ )} and {len(SCREAMING_SNAKE_CASE__ )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
else:
raise KeyError(SCREAMING_SNAKE_CASE__ )
| 368 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Optional[int] = ShapEPipeline
__snake_case :Optional[int] = ['prompt']
__snake_case :str = ['prompt']
__snake_case :Tuple = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
__snake_case :str = False
@property
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return 32
@property
def _a ( self : str ) -> str:
"""simple docstring"""
return 32
@property
def _a ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return 8
@property
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def _a ( self : int ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowerCAmelCase )
@property
def _a ( self : Dict ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowercase = PriorTransformer(**_lowerCAmelCase )
return model
@property
def _a ( self : Any ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowercase = ShapERenderer(**_lowerCAmelCase )
return model
def _a ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase = self.dummy_prior
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_renderer
__lowercase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=_lowerCAmelCase , clip_sample=_lowerCAmelCase , clip_sample_range=1.0 , )
__lowercase = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def _a ( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=0 ) -> Dict:
"""simple docstring"""
if str(_lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(_lowerCAmelCase )
else:
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def _a ( self : str ) -> Any:
"""simple docstring"""
__lowercase = """cpu"""
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**_lowerCAmelCase )
__lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
__lowercase = output.images[0]
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowercase = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self : Dict ) -> int:
"""simple docstring"""
__lowercase = torch_device == """cpu"""
__lowercase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowerCAmelCase , relax_max_difference=_lowerCAmelCase , )
def _a ( self : Any ) -> str:
"""simple docstring"""
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**_lowerCAmelCase )
__lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = 1
__lowercase = 2
__lowercase = self.get_dummy_inputs(_lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__lowercase = batch_size * [inputs[key]]
__lowercase = pipe(**_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Dict ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
__lowercase = ShapEPipeline.from_pretrained("""openai/shap-e""" )
__lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
__lowercase = pipe(
"""a shark""" , generator=_lowerCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 80 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=13 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : str=3 , _lowerCAmelCase : int=4 , _lowerCAmelCase : Optional[int]=[10, 20, 30, 40] , _lowerCAmelCase : Optional[Any]=[2, 2, 3, 2] , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : int=True , _lowerCAmelCase : List[str]=37 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : List[Any]=10 , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : str=["stage2", "stage3", "stage4"] , _lowerCAmelCase : Dict=[2, 3, 4] , _lowerCAmelCase : Tuple=None , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = num_stages
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = out_features
__lowercase = out_indices
__lowercase = scope
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : List[str] ) -> Any:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _a ( self : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase = ConvNextModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = ConvNextForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ConvNextBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowercase = None
__lowercase = ConvNextBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Optional[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__snake_case :List[str] = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
__snake_case :str = True
__snake_case :Any = False
__snake_case :Any = False
__snake_case :Any = False
__snake_case :int = False
def _a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = ConvNextModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def _a ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ):
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = ConvNextModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(_lowerCAmelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCAmelCase )
# verify the logits
__lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase , _lowerCAmelCase ):
__snake_case :Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
__snake_case :str = ConvNextConfig
__snake_case :Optional[Any] = False
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = ConvNextModelTester(self )
| 80 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
a= logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase ( _a : str ):
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_a ):
return ext
raise Exception(
f"""Unable to determine file format from file extension {path}. """
f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def _UpperCamelCase ( _a : List[str] ):
"""simple docstring"""
__UpperCamelCase : List[str] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
__UpperCamelCase : int = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
__UpperCamelCase : Dict = PipelineDataFormat.from_str(
format=_a , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(_a , _a )
class __lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase : Dict = nlp
__UpperCamelCase : int = reader
@staticmethod
def lowerCAmelCase ( _lowerCamelCase ):
__UpperCamelCase : int = parser.add_parser('run' , help='Run a pipeline through the CLI' )
run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' )
run_parser.add_argument('--input' , type=_lowerCamelCase , help='Path to the file to use for inference' )
run_parser.add_argument('--output' , type=_lowerCamelCase , help='Path to the file that will be used post to write results.' )
run_parser.add_argument('--model' , type=_lowerCamelCase , help='Name or path to the model to instantiate.' )
run_parser.add_argument('--config' , type=_lowerCamelCase , help='Name or path to the model\'s config to instantiate.' )
run_parser.add_argument(
'--tokenizer' , type=_lowerCamelCase , help='Name of the tokenizer to use. (default: same as the model name)' )
run_parser.add_argument(
'--column' , type=_lowerCamelCase , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , )
run_parser.add_argument(
'--format' , type=_lowerCamelCase , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , )
run_parser.add_argument(
'--device' , type=_lowerCamelCase , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' )
run_parser.set_defaults(func=_lowerCamelCase )
def lowerCAmelCase ( self ):
__UpperCamelCase , __UpperCamelCase : Tuple = self._nlp, []
for entry in self._reader:
__UpperCamelCase : Dict = nlp(**_lowerCamelCase ) if self._reader.is_multi_columns else nlp(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
outputs.append(_lowerCamelCase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
__UpperCamelCase : Optional[int] = self._reader.save_binary(_lowerCamelCase )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(_lowerCamelCase )
| 287 | '''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = OpenAIGPTTokenizer
SCREAMING_SNAKE_CASE__ = OpenAIGPTTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
def lowerCAmelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase : List[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase : List[str] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
__UpperCamelCase : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
__UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(_lowerCamelCase ) )
def lowerCAmelCase ( self , _lowerCamelCase ):
return "lower newer", "lower newer"
def lowerCAmelCase ( self ):
__UpperCamelCase : Dict = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__UpperCamelCase : List[Any] = 'lower'
__UpperCamelCase : Union[str, Any] = ['low', 'er</w>']
__UpperCamelCase : Tuple = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
__UpperCamelCase : List[str] = tokens + ['<unk>']
__UpperCamelCase : Optional[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def lowerCAmelCase ( self , _lowerCamelCase=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
# Simple input
__UpperCamelCase : int = 'This is a simple input'
__UpperCamelCase : Any = ['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase : Union[str, Any] = ('This is a simple input', 'This is a pair')
__UpperCamelCase : Any = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' )
# Simple input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' )
# Simple input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' , )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' )
# Pair input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' , )
def lowerCAmelCase ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __lowercase ( _lowerCamelCase ):
"""simple docstring"""
pass
| 287 | 1 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__SCREAMING_SNAKE_CASE : int = cst_fwd.get(lowerCAmelCase_ , np.inf )
__SCREAMING_SNAKE_CASE : Optional[int] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__SCREAMING_SNAKE_CASE : Optional[int] = new_cost_f
__SCREAMING_SNAKE_CASE : int = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__SCREAMING_SNAKE_CASE : Optional[Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = -1
__SCREAMING_SNAKE_CASE : List[str] = set()
__SCREAMING_SNAKE_CASE : Tuple = set()
__SCREAMING_SNAKE_CASE : Union[str, Any] = {source: 0}
__SCREAMING_SNAKE_CASE : str = {destination: 0}
__SCREAMING_SNAKE_CASE : Any = {source: None}
__SCREAMING_SNAKE_CASE : Tuple = {destination: None}
__SCREAMING_SNAKE_CASE : Tuple = PriorityQueue()
__SCREAMING_SNAKE_CASE : Optional[int] = PriorityQueue()
__SCREAMING_SNAKE_CASE : Any = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = queue_forward.get()
visited_forward.add(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = queue_backward.get()
visited_backward.add(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE : Any = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
__SCREAMING_SNAKE_CASE : List[str] = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__SCREAMING_SNAKE_CASE : int = shortest_distance
return shortest_path_distance
lowercase_ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
lowercase_ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any]=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Dict=1_8 , _lowerCAmelCase : Tuple=3_0 , _lowerCAmelCase : Optional[int]=4_0_0 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[Any]=True , ) -> Dict:
"""simple docstring"""
snake_case_ = size if size is not None else {"height": 1_8, "width": 1_8}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_normalize
def lowerCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ImageGPTImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ = ImageGPTImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , "clusters" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "size" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "do_normalize" ) )
def lowerCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8} )
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} )
def lowerCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
snake_case_ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCAmelCase , obj[key] ) )
else:
self.assertEqual(obj[key] , _lowerCAmelCase )
def lowerCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = os.path.join(_lowerCAmelCase , "image_processor.json" )
image_processor_first.to_json_file(_lowerCAmelCase )
snake_case_ = self.image_processing_class.from_json_file(_lowerCAmelCase ).to_dict()
snake_case_ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_lowerCAmelCase )
snake_case_ = self.image_processing_class.from_pretrained(_lowerCAmelCase ).to_dict()
snake_case_ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowerCAmelCase )
@unittest.skip("ImageGPT requires clusters at initialization" )
def lowerCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
pass
def _lowerCAmelCase ( )->Optional[Any]:
'''simple docstring'''
snake_case_ = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
snake_case_ = Image.open(dataset[4]["file"] )
snake_case_ = Image.open(dataset[5]["file"] )
snake_case_ = [imagea, imagea]
return images
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
snake_case_ = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
snake_case_ = prepare_images()
# test non-batched
snake_case_ = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
snake_case_ = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _lowerCAmelCase )
# test batched
snake_case_ = image_processing(_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
snake_case_ = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _lowerCAmelCase )
| 283 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class _a ( UpperCamelCase__ ):
_lowercase : str = '''swinv2'''
_lowercase : Optional[Any] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self: Optional[int] , UpperCamelCase_: Union[str, Any]=224 , UpperCamelCase_: Optional[Any]=4 , UpperCamelCase_: List[str]=3 , UpperCamelCase_: int=96 , UpperCamelCase_: Tuple=[2, 2, 6, 2] , UpperCamelCase_: List[str]=[3, 6, 12, 24] , UpperCamelCase_: Optional[int]=7 , UpperCamelCase_: Union[str, Any]=4.0 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[int]=0.1 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: Tuple=0.02 , UpperCamelCase_: str=1E-5 , UpperCamelCase_: List[Any]=32 , **UpperCamelCase_: List[str] , ) -> Dict:
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = len(UpperCamelCase_ )
lowercase__ = num_heads
lowercase__ = window_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_absolute_embeddings
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase__ = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
lowercase__ = (0, 0, 0, 0)
| 701 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
lowerCAmelCase = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 429 | 0 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def __lowerCAmelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str ) -> Any:
lowerCamelCase_ = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
lowerCamelCase_ = DatasetInfosDict.from_directory(UpperCAmelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , ),
] , )
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : DatasetInfo ) -> Dict:
lowerCamelCase_ = str(UpperCAmelCase__ )
dataset_info.write_to_directory(UpperCAmelCase__ )
lowerCamelCase_ = DatasetInfo.from_directory(UpperCAmelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCAmelCase__ , """dataset_info.json""" ) )
def __lowerCAmelCase ( ) -> List[str]:
lowerCamelCase_ = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
lowerCamelCase_ = dataset_info._to_yaml_dict()
assert sorted(UpperCAmelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCamelCase_ = yaml.safe_dump(UpperCAmelCase__ )
lowerCamelCase_ = yaml.safe_load(UpperCAmelCase__ )
assert dataset_info_yaml_dict == reloaded
def __lowerCAmelCase ( ) -> Any:
lowerCamelCase_ = DatasetInfo()
lowerCamelCase_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=4_2 ),
"""v2""": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def __lowerCAmelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : DatasetInfosDict ) -> Optional[Any]:
lowerCamelCase_ = str(UpperCAmelCase__ )
dataset_infos_dict.write_to_directory(UpperCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict.from_directory(UpperCAmelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCamelCase_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCamelCase_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCAmelCase__ , """README.md""" ) )
| 272 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = '''gpt_neox_japanese'''
def __init__( self : Union[str, Any] , __UpperCamelCase : str=3_2_0_0_0 , __UpperCamelCase : List[Any]=2_5_6_0 , __UpperCamelCase : Any=3_2 , __UpperCamelCase : List[str]=3_2 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : List[Any]=1.00 , __UpperCamelCase : Any=1_0_0_0_0 , __UpperCamelCase : Optional[Any]=2_0_4_8 , __UpperCamelCase : Tuple=0.02 , __UpperCamelCase : List[str]=1E-5 , __UpperCamelCase : str=True , __UpperCamelCase : str=3_1_9_9_6 , __UpperCamelCase : int=3_1_9_9_9 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Tuple=0.0 , **__UpperCamelCase : List[str] , ):
super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_multiple_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = rotary_pct
lowerCamelCase_ = rotary_emb_base
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = use_cache
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = hidden_dropout
| 272 | 1 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_A = logging.get_logger(__name__)
class A ( __UpperCAmelCase ):
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''', UpperCamelCase__, )
super().__init__(*UpperCamelCase__, **UpperCamelCase__ )
| 706 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class A ( __UpperCAmelCase ):
__snake_case = 'roberta-prelayernorm'
def __init__( self, UpperCamelCase__=5_0265, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=512, UpperCamelCase__=2, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=1, UpperCamelCase__=0, UpperCamelCase__=2, UpperCamelCase__="absolute", UpperCamelCase__=True, UpperCamelCase__=None, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__, bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = classifier_dropout
class A ( __UpperCAmelCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 325 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 389 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 389 | 1 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowerCamelCase__ ( A , A , A , unittest.TestCase ):
'''simple docstring'''
A_ = StableDiffusionControlNetImgaImgPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
_lowercase : List[str] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
_lowercase : Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
_lowercase : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_lowercase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_lowercase : Any = CLIPTextModel(UpperCamelCase_ )
_lowercase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowercase : List[Any] = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any]=0 ) -> Dict:
'''simple docstring'''
if str(UpperCamelCase_ ).startswith('mps' ):
_lowercase : Any = torch.manual_seed(UpperCamelCase_ )
else:
_lowercase : List[str] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
_lowercase : Union[str, Any] = 2
_lowercase : Optional[Any] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase_ , device=torch.device(UpperCamelCase_ ) , )
_lowercase : Optional[Any] = floats_tensor(control_image.shape , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
_lowercase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase : str = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
_lowercase : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __UpperCAmelCase ( self : Dict ) -> Any:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class lowerCamelCase__ ( A , A , unittest.TestCase ):
'''simple docstring'''
A_ = StableDiffusionControlNetImgaImgPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(UpperCamelCase_ : Optional[int] ):
if isinstance(UpperCamelCase_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_lowercase : List[str] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCamelCase_ )
torch.manual_seed(0 )
_lowercase : str = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCamelCase_ )
torch.manual_seed(0 )
_lowercase : Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
_lowercase : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_lowercase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_lowercase : Optional[int] = CLIPTextModel(UpperCamelCase_ )
_lowercase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowercase : Tuple = MultiControlNetModel([controlneta, controlneta] )
_lowercase : Any = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple=0 ) -> int:
'''simple docstring'''
if str(UpperCamelCase_ ).startswith('mps' ):
_lowercase : List[Any] = torch.manual_seed(UpperCamelCase_ )
else:
_lowercase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
_lowercase : List[Any] = 2
_lowercase : Optional[Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase_ , device=torch.device(UpperCamelCase_ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase_ , device=torch.device(UpperCamelCase_ ) , ),
]
_lowercase : int = floats_tensor(control_image[0].shape , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
_lowercase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase : Optional[int] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
_lowercase : str = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def __UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
_lowercase : Dict = self.get_dummy_components()
_lowercase : List[str] = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
_lowercase : Tuple = 10.0
_lowercase : List[Any] = 4
_lowercase : Any = self.get_dummy_inputs(UpperCamelCase_ )
_lowercase : Union[str, Any] = steps
_lowercase : Optional[int] = scale
_lowercase : Optional[int] = pipe(**UpperCamelCase_ )[0]
_lowercase : Dict = self.get_dummy_inputs(UpperCamelCase_ )
_lowercase : Tuple = steps
_lowercase : Any = scale
_lowercase : List[Any] = pipe(**UpperCamelCase_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_lowercase : int = self.get_dummy_inputs(UpperCamelCase_ )
_lowercase : Optional[int] = steps
_lowercase : Any = scale
_lowercase : Dict = pipe(**UpperCamelCase_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_lowercase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_ )
_lowercase : List[Any] = steps
_lowercase : List[Any] = scale
_lowercase : Optional[Any] = pipe(**UpperCamelCase_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __UpperCAmelCase ( self : List[str] ) -> Dict:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
'''simple docstring'''
_lowercase : int = self.get_dummy_components()
_lowercase : Any = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(UpperCamelCase_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
_lowercase : List[str] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=UpperCamelCase_ , controlnet=UpperCamelCase_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowercase : Union[str, Any] = 'evil space-punk bird'
_lowercase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
_lowercase : str = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
_lowercase : str = pipe(
UpperCamelCase_ , UpperCamelCase_ , control_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='np' , num_inference_steps=50 , strength=0.6 , )
_lowercase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
_lowercase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9E-2
| 4 |
'''simple docstring'''
from __future__ import annotations
import requests
def __UpperCamelCase ( _lowercase ) -> dict:
_lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_lowercase ).json()
def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]:
_lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
_lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories]
return [get_hackernews_story(_lowercase ) for story_id in story_ids]
def __UpperCamelCase ( _lowercase = 10 ) -> str:
_lowercase : Tuple = hackernews_top_stories(_lowercase )
return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 1 |
_UpperCAmelCase = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
_UpperCAmelCase = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
_UpperCAmelCase = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
_UpperCAmelCase = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
_UpperCAmelCase = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
_UpperCAmelCase = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
_UpperCAmelCase = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
_UpperCAmelCase = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
] | 504 |
import numpy
class snake_case_ :
def __init__( self : List[str] , _snake_case : numpy.ndarray , _snake_case : numpy.ndarray )->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__lowerCAmelCase : Tuple = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__lowerCAmelCase : Union[str, Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__lowerCAmelCase : Dict = numpy.random.rand(3 , 1 )
# Real output values provided.
__lowerCAmelCase : Optional[int] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__lowerCAmelCase : Tuple = numpy.zeros(output_array.shape )
def UpperCAmelCase__ ( self : int )->numpy.ndarray:
'''simple docstring'''
__lowerCAmelCase : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__lowerCAmelCase : str = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__lowerCAmelCase : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ ( self : int )->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__lowerCAmelCase : Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__lowerCAmelCase : Dict = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ ( self : Any , _snake_case : numpy.ndarray , _snake_case : int , _snake_case : bool )->None:
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
__lowerCAmelCase : Tuple = self.feedforward()
self.back_propagation()
if give_loss:
__lowerCAmelCase : List[Any] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : numpy.ndarray )->int:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = input_arr
__lowerCAmelCase : str = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__lowerCAmelCase : List[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__lowerCAmelCase : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :numpy.ndarray ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :numpy.ndarray ) -> numpy.ndarray:
return (value) * (1 - (value))
def _SCREAMING_SNAKE_CASE ( ) -> int:
__lowerCAmelCase : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__lowerCAmelCase : Optional[Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__lowerCAmelCase : Union[str, Any] = TwoHiddenLayerNeuralNetwork(
input_array=SCREAMING_SNAKE_CASE , output_array=SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=SCREAMING_SNAKE_CASE , iterations=10 , give_loss=SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example() | 504 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
A = logging.get_logger(__name__)
class a__ ( __A ):
def __init__( self : Dict , *UpperCamelCase_ : Any , **UpperCamelCase_ : List[str]):
"""simple docstring"""
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__)
| 709 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = ConsistencyModelPipeline
lowercase_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase_ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Dict = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def a_ ( self : Any , UpperCamelCase_ : int=False):
"""simple docstring"""
if class_cond:
__UpperCAmelCase : List[Any] = self.dummy_cond_unet
else:
__UpperCAmelCase : Optional[int] = self.dummy_uncond_unet
# Default to CM multistep sampler
__UpperCAmelCase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
}
return components
def a_ ( self : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=0):
"""simple docstring"""
if str(UpperCamelCase_).startswith("mps"):
__UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_)
else:
__UpperCAmelCase : Optional[Any] = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_)
__UpperCAmelCase : List[Any] = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
__UpperCAmelCase : str = ConsistencyModelPipeline(**UpperCamelCase_)
__UpperCAmelCase : Any = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_)
__UpperCAmelCase : str = pipe(**UpperCamelCase_).images
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
__UpperCAmelCase : List[Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Optional[int] = self.get_dummy_components(class_cond=UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = ConsistencyModelPipeline(**UpperCamelCase_)
__UpperCAmelCase : str = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_)
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : int = pipe(**UpperCamelCase_).images
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
__UpperCAmelCase : List[Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Optional[int] = self.get_dummy_components()
__UpperCAmelCase : Optional[Any] = ConsistencyModelPipeline(**UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : List[str] = self.get_dummy_inputs(UpperCamelCase_)
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : int = None
__UpperCAmelCase : List[Any] = pipe(**UpperCamelCase_).images
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
__UpperCAmelCase : Union[str, Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : List[str] = self.get_dummy_components(class_cond=UpperCamelCase_)
__UpperCAmelCase : Tuple = ConsistencyModelPipeline(**UpperCamelCase_)
__UpperCAmelCase : int = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Tuple = pipe(**UpperCamelCase_).images
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
__UpperCAmelCase : Dict = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def a_ ( self : Optional[int]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : int="cpu" , UpperCamelCase_ : Any=torch.floataa , UpperCamelCase_ : List[str]=(1, 3, 64, 64)):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = torch.manual_seed(UpperCamelCase_)
__UpperCAmelCase : int = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
__UpperCAmelCase : int = self.get_fixed_latents(seed=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ , shape=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = latents
return inputs
def a_ ( self : Union[str, Any] , UpperCamelCase_ : int=0 , UpperCamelCase_ : Tuple="cpu" , UpperCamelCase_ : Tuple=torch.floataa , UpperCamelCase_ : Optional[Any]=(1, 3, 64, 64)):
"""simple docstring"""
if type(UpperCamelCase_) == str:
__UpperCAmelCase : Union[str, Any] = torch.device(UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_)
return latents
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
__UpperCAmelCase : Dict = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : Dict = ConsistencyModelPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_)
pipe.to(torch_device=UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Dict = self.get_inputs()
__UpperCAmelCase : List[str] = pipe(**UpperCamelCase_).images
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
__UpperCAmelCase : Union[str, Any] = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : List[str] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
__UpperCAmelCase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : Any = ConsistencyModelPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_)
pipe.to(torch_device=UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : int = self.get_inputs()
__UpperCAmelCase : str = 1
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = pipe(**UpperCamelCase_).images
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
__UpperCAmelCase : Union[str, Any] = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
@require_torch_a
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : int = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
__UpperCAmelCase : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : int = ConsistencyModelPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_)
pipe.to(torch_device=UpperCamelCase_ , torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Tuple = self.get_inputs(get_fixed_latents=UpperCamelCase_ , device=UpperCamelCase_)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCamelCase_ , enable_math=UpperCamelCase_ , enable_mem_efficient=UpperCamelCase_):
__UpperCAmelCase : List[str] = pipe(**UpperCamelCase_).images
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
__UpperCAmelCase : Optional[int] = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@require_torch_a
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : Tuple = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
__UpperCAmelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : List[Any] = ConsistencyModelPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_)
pipe.to(torch_device=UpperCamelCase_ , torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = self.get_inputs(get_fixed_latents=UpperCamelCase_ , device=UpperCamelCase_)
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase : Any = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCamelCase_ , enable_math=UpperCamelCase_ , enable_mem_efficient=UpperCamelCase_):
__UpperCAmelCase : List[str] = pipe(**UpperCamelCase_).images
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : int = image[0, -3:, -3:, -1]
__UpperCAmelCase : List[str] = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
| 487 | 0 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
snake_case_ : Optional[Any] = random.Random()
def __snake_case ( _UpperCAmelCase : int, _UpperCAmelCase : Tuple=1.0, _UpperCAmelCase : Union[str, Any]=None, _UpperCAmelCase : Optional[Any]=None):
if rng is None:
UpperCamelCase = global_rng
UpperCamelCase = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=4_0_0 , lowerCamelCase__=2_0_0_0 , lowerCamelCase__=2_0_4_8 , lowerCamelCase__=1_2_8 , lowerCamelCase__=1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=3_0 , lowerCamelCase__=4_4_1_0_0 , ):
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = min_seq_length
UpperCamelCase = max_seq_length
UpperCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase = spectrogram_length
UpperCamelCase = feature_size
UpperCamelCase = num_audio_channels
UpperCamelCase = hop_length
UpperCamelCase = chunk_length
UpperCamelCase = sampling_rate
def UpperCAmelCase ( self ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCAmelCase ( self , lowerCamelCase__=False , lowerCamelCase__=False ):
'''simple docstring'''
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
UpperCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__ ( snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = TvltFeatureExtractor
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = TvltFeatureExtractionTester(self )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase__ , '''spectrogram_length''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''feature_size''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''num_audio_channels''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''hop_length''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''chunk_length''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''sampling_rate''' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
UpperCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
UpperCamelCase = feat_extract_first.to_dict()
UpperCamelCase = feat_extract_second.to_dict()
UpperCamelCase = dict_first.pop('''mel_filters''' )
UpperCamelCase = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = os.path.join(lowerCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCamelCase__ )
UpperCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
UpperCamelCase = feat_extract_first.to_dict()
UpperCamelCase = feat_extract_second.to_dict()
UpperCamelCase = dict_first.pop('''mel_filters''' )
UpperCamelCase = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
UpperCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
UpperCamelCase = feature_extractor(
lowerCamelCase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=lowerCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
UpperCamelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCamelCase = np.asarray(lowerCamelCase__ )
UpperCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCamelCase = ds.sort('''id''' ).select(range(lowerCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self._load_datasamples(1 )
UpperCamelCase = TvltFeatureExtractor()
UpperCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
UpperCamelCase = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowerCamelCase__ , atol=1e-4 ) )
| 212 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=4 , ):
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = True
UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase__ ( snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = True
_snake_case = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained('''roberta-base''' , from_pt=lowerCamelCase__ )
UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 212 | 1 |
import requests
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> None:
SCREAMING_SNAKE_CASE_ : Any ={'''Content-Type''': '''application/json'''}
SCREAMING_SNAKE_CASE_ : int =requests.post(UpperCAmelCase_ , json={'''text''': message_body} , headers=UpperCAmelCase_ )
if response.status_code != 2_0_0:
SCREAMING_SNAKE_CASE_ : Optional[int] =(
'''Request to slack returned an error '''
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(UpperCAmelCase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 431 |
# Function to print upper half of diamond (pyramid)
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int ) -> Dict:
for i in range(0 , UpperCAmelCase_ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[Any] ) -> List[str]:
for i in range(UpperCAmelCase_ , 0 , -1 ):
for _ in range(UpperCAmelCase_ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Union[str, Any] ) -> List[Any]:
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(UpperCAmelCase_ ) # upper half
reverse_floyd(UpperCAmelCase_ ) # lower half
if __name__ == "__main__":
print(R"""| /\ | |- | |- |--| |\ /| |-""")
print(R"""|/ \| |- |_ |_ |__| | \/ | |_""")
_lowercase = 1
while K:
_lowercase = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
_lowercase = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 431 | 1 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ = emb.weight.shape
UpperCAmelCase__ = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
UpperCAmelCase__ = emb.weight.data
return lin_layer
def lowercase ( _lowerCAmelCase , _lowerCAmelCase="facebook/mbart-large-en-ro" , _lowerCAmelCase=False , _lowerCAmelCase=False ):
UpperCAmelCase__ = torch.load(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
remove_ignore_keys_(_lowerCAmelCase )
UpperCAmelCase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0]
UpperCAmelCase__ = MBartConfig.from_pretrained(_lowerCAmelCase , vocab_size=_lowerCAmelCase )
if mbart_aa and finetuned:
UpperCAmelCase__ = """relu"""
UpperCAmelCase__ = state_dict["""decoder.embed_tokens.weight"""]
UpperCAmelCase__ = MBartForConditionalGeneration(_lowerCAmelCase )
model.model.load_state_dict(_lowerCAmelCase )
if finetuned:
UpperCAmelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
snake_case__ : List[Any] = parser.parse_args()
snake_case__ : Optional[int] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 392 |
class snake_case :
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ) ->Tuple:
'''simple docstring'''
UpperCAmelCase__ = name
UpperCAmelCase__ = value
UpperCAmelCase__ = weight
def __repr__( self : List[Any] ) ->List[Any]:
'''simple docstring'''
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def UpperCAmelCase ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
return self.value
def UpperCAmelCase ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
return self.name
def UpperCAmelCase ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
return self.weight
def UpperCAmelCase ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
return self.value / self.weight
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ = []
for i in range(len(_lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ = sorted(_lowerCAmelCase , key=_lowerCAmelCase , reverse=_lowerCAmelCase )
UpperCAmelCase__ = []
UpperCAmelCase__ , UpperCAmelCase__ = 0.0, 0.0
for i in range(len(_lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def lowercase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 392 | 1 |
'''simple docstring'''
import json
import sys
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
with open(UpperCAmelCase_ , encoding='utf-8' ) as f:
UpperCAmelCase : Optional[Any] = json.load(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = results[benchmark_name]
UpperCAmelCase : List[str] = benchmark_name.split('/' )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
UpperCAmelCase : Union[str, Any] = '| metric |'
UpperCAmelCase : List[Any] = '|--------|'
UpperCAmelCase : int = '| new / old (diff) |'
for metric_name in sorted(UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = benchmark_res[metric_name]
UpperCAmelCase : List[Any] = metric_vals['new']
UpperCAmelCase : Any = metric_vals.get('old' , UpperCAmelCase_ )
UpperCAmelCase : int = metric_vals.get('diff' , UpperCAmelCase_ )
UpperCAmelCase : Dict = F""" {new_val:f}""" if isinstance(UpperCAmelCase_ , (int, float) ) else 'None'
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(UpperCAmelCase_ , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(UpperCAmelCase_ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(UpperCAmelCase_ ) )
if __name__ == "__main__":
lowercase__ = sys.argv[1]
lowercase__ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
a : Optional[Any] = '''Muhammad Umer Farooq'''
a : int = '''MIT'''
a : Dict = '''1.0.0'''
a : Optional[int] = '''Muhammad Umer Farooq'''
a : Optional[Any] = '''[email protected]'''
a : Union[str, Any] = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : Optional[Any] , a_ : str ):
"""simple docstring"""
super().__init__()
__snake_case = []
__snake_case = domain
def A ( self : str , a_ : str , a_ : list[tuple[str, str | None]] ):
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case = parse.urljoin(self.domain , a_ )
self.urls.append(a_ )
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> str:
return ".".join(get_sub_domain_name(_UpperCAmelCase ).split("." )[-2:] )
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> str:
return parse.urlparse(_UpperCAmelCase ).netloc
def __UpperCAmelCase ( _UpperCAmelCase : str = "https://github.com" ) -> list[str]:
__snake_case = get_domain_name(_UpperCAmelCase )
# Initialize the parser
__snake_case = Parser(_UpperCAmelCase )
try:
# Open URL
__snake_case = requests.get(_UpperCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case = requests.get(_UpperCAmelCase )
# Get the valid email.
__snake_case = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_UpperCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_UpperCAmelCase )
if __name__ == "__main__":
a : Any = emails_from_url('''https://github.com''')
print(F'''{len(emails)} emails found:''')
print('''\n'''.join(sorted(emails)))
| 69 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
__snake_case = gray_code_sequence_string(_UpperCAmelCase )
#
# convert them to integers
for i in range(len(_UpperCAmelCase ) ):
__snake_case = int(sequence[i] , 2 )
return sequence
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__snake_case = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__snake_case = gray_code_sequence_string(bit_count - 1 )
__snake_case = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__snake_case = "0" + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__snake_case = "1" + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 | 1 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _lowerCAmelCase( unittest.TestCase):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self )-> str:
__A = torch.nn.Linear(10 , 10 )
__A = torch.optim.SGD(model.parameters() , 0.1 )
__A = Accelerator()
__A = accelerator.prepare(_lowercase )
try:
pickle.loads(pickle.dumps(_lowercase ) )
except Exception as e:
self.fail(f"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 707 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __UpperCamelCase ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> np.ndarray:
'''simple docstring'''
if (ksize % 2) == 0:
__A = ksize + 1
__A = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(snake_case ):
for x in range(snake_case ):
# distance from center
__A = x - ksize // 2
__A = y - ksize // 2
# degree to radiant
__A = theta / 1_8_0 * np.pi
__A = np.cos(_theta )
__A = np.sin(_theta )
# get kernel x
__A = cos_theta * px + sin_theta * py
# get kernel y
__A = -sin_theta * px + cos_theta * py
# fill kernel
__A = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_UpperCamelCase : int = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
_UpperCamelCase : str = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_UpperCamelCase : List[Any] = np.zeros(gray.shape[:2])
for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]:
_UpperCamelCase : Optional[Any] = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_UpperCamelCase : Any = out / out.max() * 2_5_5
_UpperCamelCase : Optional[Any] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 341 | 0 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_a : Dict = logging.get_logger(__name__)
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
snake_case : List[str] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(a_ ) == len(a_ ), F"{len(a_ )} != {len(a_ )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_a : Tuple = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_a : Union[str, Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a_ ( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
try:
snake_case : Optional[int] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(a_ ) )
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(a_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a_ ( __magic_name__ , __magic_name__ = "student" , __magic_name__ = None , __magic_name__ = None , __magic_name__=False , __magic_name__=None , __magic_name__=None , **__magic_name__ , ) -> Any:
"""simple docstring"""
snake_case : Optional[int] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(a_ , a_ ):
AutoTokenizer.from_pretrained(a_ ).save_pretrained(a_ ) # purely for convenience
snake_case : Tuple = AutoModelForSeqaSeqLM.from_pretrained(a_ ).eval()
else:
assert isinstance(a_ , a_ ), F"teacher must be a model or string got type {type(a_ )}"
snake_case : str = teacher.config.to_diff_dict()
try:
snake_case : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
snake_case : List[str] = teacher_e
if d is None:
snake_case : List[str] = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
snake_case : Dict = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
snake_case : List[str] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
snake_case : str = teacher_e
if d is None:
snake_case : Tuple = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(a_ )
# Copy weights
snake_case : Optional[Any] = teacher.config_class(**a_ )
snake_case : int = AutoModelForSeqaSeqLM.from_config(a_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
snake_case : Union[str, Any] = student.load_state_dict(teacher.state_dict() , strict=a_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
snake_case : Dict = list(range(a_ ) ), list(range(a_ ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(a_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
snake_case : List[int] = pick_layers_to_copy(a_ , a_ )
if d_layers_to_copy is None:
snake_case : List[int] = pick_layers_to_copy(a_ , a_ )
try:
if hasattr(
a_ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , a_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , a_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , a_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , a_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , a_ )
copy_layers(teacher.decoder.block , student.decoder.block , a_ )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
snake_case : Any = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(a_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 598 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __A ( ):
lowerCAmelCase , lowerCAmelCase : List[Any] = 9, 1_4 # noqa: F841
lowerCAmelCase : int = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
lowerCAmelCase : Optional[Any] = defaultdict(a_ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCAmelCase : int = mst(a_ )
lowerCAmelCase : Tuple = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCAmelCase : List[str] = tuple(answer[:2] )
lowerCAmelCase : int = tuple(edge[::-1] )
assert edge in result or reverse in result
| 525 | 0 |
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Any , *__lowerCamelCase : str , **__lowerCamelCase : int ):
"""simple docstring"""
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__lowerCamelCase )
_snake_case = self.values[key]
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return (
sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any]=None ):
"""simple docstring"""
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase )
| 404 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
snake_case = None
snake_case = logging.get_logger(__name__)
snake_case = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
snake_case = {
'''google/bigbird-roberta-base''': 4_0_9_6,
'''google/bigbird-roberta-large''': 4_0_9_6,
'''google/bigbird-base-trivia-itc''': 4_0_9_6,
}
snake_case = '''▁'''
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Dict = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[Any] = BigBirdTokenizer
A__ : Dict = ['''input_ids''', '''attention_mask''']
A__ : List[int] = []
def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : int="<unk>" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : Tuple="</s>" , __lowerCamelCase : Any="<pad>" , __lowerCamelCase : List[str]="[SEP]" , __lowerCamelCase : str="[MASK]" , __lowerCamelCase : str="[CLS]" , **__lowerCamelCase : Any , ):
"""simple docstring"""
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 404 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
_lowercase = tempfile.mkdtemp()
_lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
_lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_lowercase = {
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
'do_convert_rgb': True,
}
_lowercase = os.path.join(self.tmpdirname ,__A )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(__A ,__A )
def __UpperCAmelCase ( self : Optional[int] ,**__A : Tuple ) -> Any:
return BertTokenizer.from_pretrained(self.tmpdirname ,**__A )
def __UpperCAmelCase ( self : int ,**__A : Union[str, Any] ) -> List[Any]:
return BertTokenizerFast.from_pretrained(self.tmpdirname ,**__A )
def __UpperCAmelCase ( self : List[Any] ,**__A : List[Any] ) -> List[str]:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname ,**__A )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : Tuple ) -> int:
_lowercase = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
_lowercase = [Image.fromarray(np.moveaxis(__A ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
_lowercase = self.get_tokenizer()
_lowercase = self.get_rust_tokenizer()
_lowercase = self.get_image_processor()
_lowercase = ChineseCLIPProcessor(tokenizer=__A ,image_processor=__A )
processor_slow.save_pretrained(self.tmpdirname )
_lowercase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=__A )
_lowercase = ChineseCLIPProcessor(tokenizer=__A ,image_processor=__A )
processor_fast.save_pretrained(self.tmpdirname )
_lowercase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,__A )
self.assertIsInstance(processor_fast.tokenizer ,__A )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,__A )
self.assertIsInstance(processor_fast.image_processor ,__A )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
_lowercase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase = self.get_tokenizer(cls_token='(CLS)' ,sep_token='(SEP)' )
_lowercase = self.get_image_processor(do_normalize=__A )
_lowercase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname ,cls_token='(CLS)' ,sep_token='(SEP)' ,do_normalize=__A )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__A )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__A )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
_lowercase = self.get_image_processor()
_lowercase = self.get_tokenizer()
_lowercase = ChineseCLIPProcessor(tokenizer=__A ,image_processor=__A )
_lowercase = self.prepare_image_inputs()
_lowercase = image_processor(__A ,return_tensors='np' )
_lowercase = processor(images=__A ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
_lowercase = self.get_image_processor()
_lowercase = self.get_tokenizer()
_lowercase = ChineseCLIPProcessor(tokenizer=__A ,image_processor=__A )
_lowercase = 'Alexandra,T-shirt的价格是15便士。'
_lowercase = processor(text=__A )
_lowercase = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
_lowercase = self.get_image_processor()
_lowercase = self.get_tokenizer()
_lowercase = ChineseCLIPProcessor(tokenizer=__A ,image_processor=__A )
_lowercase = 'Alexandra,T-shirt的价格是15便士。'
_lowercase = self.prepare_image_inputs()
_lowercase = processor(text=__A ,images=__A )
self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
_lowercase = self.get_image_processor()
_lowercase = self.get_tokenizer()
_lowercase = ChineseCLIPProcessor(tokenizer=__A ,image_processor=__A )
_lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase = processor.batch_decode(__A )
_lowercase = tokenizer.batch_decode(__A )
self.assertListEqual(__A ,__A )
def __UpperCAmelCase ( self : Dict ) -> str:
_lowercase = self.get_image_processor()
_lowercase = self.get_tokenizer()
_lowercase = ChineseCLIPProcessor(tokenizer=__A ,image_processor=__A )
_lowercase = 'Alexandra,T-shirt的价格是15便士。'
_lowercase = self.prepare_image_inputs()
_lowercase = processor(text=__A ,images=__A )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names ) | 67 |
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray ) -> bool:
return np.array_equal(snake_case__ , matrix.conjugate().T )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray , snake_case__ :np.ndarray ) -> Any:
_lowercase = v.conjugate().T
_lowercase = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_lowercase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
_lowercase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 67 | 1 |
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__UpperCAmelCase = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def lowerCAmelCase_ ( __A : Optional[int] , __A : int=None ):
'''simple docstring'''
require_version(deps[pkg] , __A ) | 692 |
'''simple docstring'''
import math
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( __A : float = 0.1 ):
'''simple docstring'''
snake_case: Optional[int] = 3
snake_case: int = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__A )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 692 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _UpperCamelCase (a_ ):
snake_case_ = """time_series_transformer"""
snake_case_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "student_t" , __UpperCamelCase = "nll" , __UpperCamelCase = 1 , __UpperCamelCase = [1, 2, 3, 4, 5, 6, 7] , __UpperCamelCase = "mean" , __UpperCamelCase = 0 , __UpperCamelCase = 0 , __UpperCamelCase = 0 , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 3_2 , __UpperCamelCase = 3_2 , __UpperCamelCase = 2 , __UpperCamelCase = 2 , __UpperCamelCase = 2 , __UpperCamelCase = 2 , __UpperCamelCase = True , __UpperCamelCase = "gelu" , __UpperCamelCase = 6_4 , __UpperCamelCase = 0.1 , __UpperCamelCase = 0.1 , __UpperCamelCase = 0.1 , __UpperCamelCase = 0.1 , __UpperCamelCase = 0.1 , __UpperCamelCase = 1_0_0 , __UpperCamelCase = 0.0_2 , __UpperCamelCase=True , **__UpperCamelCase , )-> List[Any]:
# time series specific configuration
__lowerCAmelCase = prediction_length
__lowerCAmelCase = context_length or prediction_length
__lowerCAmelCase = distribution_output
__lowerCAmelCase = loss
__lowerCAmelCase = input_size
__lowerCAmelCase = num_time_features
__lowerCAmelCase = lags_sequence
__lowerCAmelCase = scaling
__lowerCAmelCase = num_dynamic_real_features
__lowerCAmelCase = num_static_real_features
__lowerCAmelCase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__lowerCAmelCase = cardinality
else:
__lowerCAmelCase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__lowerCAmelCase = embedding_dimension
else:
__lowerCAmelCase = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowerCAmelCase = num_parallel_samples
# Transformer architecture configuration
__lowerCAmelCase = input_size * len(__UpperCamelCase ) + self._number_of_features
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = use_cache
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def __UpperCAmelCase ( self )-> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 367 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : int = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : Optional[int] = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : str = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
lowerCamelCase : List[str] = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
lowerCamelCase : Union[str, Any] = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
lowerCamelCase : Dict = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase : Union[str, Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase : Optional[Any] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _UpperCamelCase (a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRContextEncoderTokenizer
class _UpperCamelCase (a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRQuestionEncoderTokenizer
lowerCamelCase : Dict = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCamelCase : int = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCamelCase : Optional[int] = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(a_ )
class _UpperCamelCase :
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
elif titles is None or texts is None:
__lowerCAmelCase = titles if texts is None else texts
return super().__call__(
__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
__lowerCAmelCase = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles]
__lowerCAmelCase = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts]
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages
assert len(__UpperCamelCase ) == len(
__UpperCamelCase ), F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts."""
__lowerCAmelCase = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["input_ids"]
__lowerCAmelCase = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["input_ids"]
__lowerCAmelCase = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase )
]
}
if return_attention_mask is not False:
__lowerCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowerCAmelCase = attention_mask
return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1_6 , __UpperCamelCase = 6_4 , __UpperCamelCase = 4 , )-> List[DPRSpanPrediction]:
__lowerCAmelCase = reader_input["input_ids"]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = reader_output[:3]
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ )
__lowerCAmelCase = []
for doc_id in sorted_docs:
__lowerCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowerCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowerCAmelCase = sequence_ids.index(self.pad_token_id )
else:
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> List[DPRSpanPrediction]:
__lowerCAmelCase = []
for start_index, start_score in enumerate(__UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowerCAmelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
__lowerCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
__lowerCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a_ )
class _UpperCamelCase (a_ , a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = READER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = READER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = ["""input_ids""", """attention_mask"""]
snake_case_ = DPRReaderTokenizer
| 367 | 1 |
"""simple docstring"""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCAmelCase__ = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
UpperCAmelCase__ = {
"""facebook/blenderbot_small-90M""": 5_1_2,
}
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = VOCAB_FILES_NAMES
_snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Optional[Any] = BlenderbotSmallTokenizer
def __init__( self : str , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : List[Any]="<|endoftext|>" , __lowerCAmelCase : Tuple="<|endoftext|>" , __lowerCAmelCase : Optional[Any]="<|endoftext|>" , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Union[str, Any]=True , **__lowerCAmelCase : Optional[int] , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=__lowerCAmelCase , merges=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , ) , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , **__lowerCAmelCase , )
_UpperCAmelCase = add_prefix_space
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any]=None ):
_UpperCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 711 | """simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase__ = CLIPImageProcessor()
UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
UpperCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 275 | 0 |
lowerCAmelCase = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 43 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: Any=30 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: int=2 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Any=0.6 , UpperCamelCase_: Any=None , ) -> str:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = mask_ratio
lowercase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModel(config=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
# expected sequence length = num_patches
lowercase__ = (self.image_size // self.patch_size) ** 2
lowercase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ = 1
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
lowercase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_lowercase : List[str] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
_lowercase : Optional[int] = False
_lowercase : List[str] = False
_lowercase : Optional[int] = False
_lowercase : Optional[int] = False
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = copy.deepcopy(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = outputs_dict[0].numpy()
lowercase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase_: List[Any] ):
lowercase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase_ ):
lowercase__ = v.numpy()
else:
lowercase__ = np.array(UpperCamelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = prepare_numpy_arrays(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple ) -> str:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.constant(UpperCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ = tf_noise
super().check_pt_tf_models(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase_ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(UpperCamelCase_ , UpperCamelCase_ ),)
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase_ , '''_keras_serializable''' , UpperCamelCase_ )
}
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.convert_to_tensor(UpperCamelCase_ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ = main_layer_class(UpperCamelCase_ )
lowercase__ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ = tf.keras.Model(UpperCamelCase_ , outputs=main_layer(UpperCamelCase_ ) )
lowercase__ = model(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(UpperCamelCase_ , '''keras_model.h5''' )
model.save(UpperCamelCase_ )
lowercase__ = tf.keras.models.load_model(
UpperCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase_ , tf.keras.Model )
lowercase__ = model(UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = outputs.last_hidden_state.numpy()
lowercase__ = 0
else:
lowercase__ = outputs.logits.numpy()
lowercase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ , saved_model=UpperCamelCase_ )
lowercase__ = model_class.from_pretrained(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = after_outputs['''last_hidden_state'''].numpy()
lowercase__ = 0
else:
lowercase__ = after_outputs['''logits'''].numpy()
lowercase__ = 0
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase_ , 1E-5 )
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase_ )
lowercase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ = model_class.from_config(model.config )
lowercase__ = new_model(UpperCamelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ = new_model(UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(UpperCamelCase_ )
def _a ( ):
"""simple docstring"""
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ = ViTMAEConfig()
lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
# verify the logits
lowercase__ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowercase__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
| 43 | 1 |
from sklearn.metrics import recall_score
import datasets
snake_case__ : Optional[int] = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
snake_case__ : int = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
snake_case__ : int = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
'''simple docstring'''
def _snake_case ( self : Tuple ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int]=None , lowerCamelCase : List[Any]=1 , lowerCamelCase : List[str]="binary" , lowerCamelCase : Tuple=None , lowerCamelCase : Tuple="warn" , ):
'''simple docstring'''
__lowercase = recall_score(
lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase , zero_division=lowerCamelCase , )
return {"recall": float(lowerCamelCase ) if score.size == 1 else score}
| 721 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Dict = """conditional_detr"""
_snake_case : Union[str, Any] = ["""past_key_values"""]
_snake_case : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[Any] , lowerCamelCase : int=True , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[int]=3 , lowerCamelCase : Optional[int]=300 , lowerCamelCase : List[Any]=6 , lowerCamelCase : str=2_048 , lowerCamelCase : Any=8 , lowerCamelCase : List[str]=6 , lowerCamelCase : Any=2_048 , lowerCamelCase : List[Any]=8 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : List[Any]=True , lowerCamelCase : str="relu" , lowerCamelCase : int=256 , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Dict=0.0 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : int=1.0 , lowerCamelCase : Tuple=False , lowerCamelCase : List[str]="sine" , lowerCamelCase : List[Any]="resnet50" , lowerCamelCase : Any=True , lowerCamelCase : Any=False , lowerCamelCase : List[Any]=2 , lowerCamelCase : List[Any]=5 , lowerCamelCase : str=2 , lowerCamelCase : Dict=1 , lowerCamelCase : List[str]=1 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Dict=5 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Tuple=0.25 , **lowerCamelCase : List[str] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__lowercase = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = backbone_config.get("model_type" )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(lowerCamelCase )
__lowercase = use_timm_backbone
__lowercase = backbone_config
__lowercase = num_channels
__lowercase = num_queries
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = encoder_layers
__lowercase = auxiliary_loss
__lowercase = position_embedding_type
__lowercase = backbone
__lowercase = use_pretrained_backbone
__lowercase = dilation
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = mask_loss_coefficient
__lowercase = dice_loss_coefficient
__lowercase = cls_loss_coefficient
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _snake_case ( self : str ):
'''simple docstring'''
return self.d_model
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Any = version.parse("""1.11""" )
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _snake_case ( self : Any ):
'''simple docstring'''
return 1e-5
@property
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return 12
| 655 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
if "cls_token" in name:
SCREAMING_SNAKE_CASE_ : Tuple =name.replace('cls_token' ,'vit.embeddings.cls_token' )
if "mask_token" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] =name.replace('mask_token' ,'decoder.mask_token' )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_ : Any =name.replace('decoder_pos_embed' ,'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_ : Any =name.replace('pos_embed' ,'vit.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ : List[Any] =name.replace('patch_embed.proj' ,'vit.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] =name.replace('patch_embed.norm' ,'vit.embeddings.norm' )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] =name.replace('decoder_blocks' ,'decoder.decoder_layers' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] =name.replace('blocks' ,'vit.encoder.layer' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] =name.replace('attn.proj' ,'attention.output.dense' )
if "attn" in name:
SCREAMING_SNAKE_CASE_ : List[Any] =name.replace('attn' ,'attention.self' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ : List[str] =name.replace('norm1' ,'layernorm_before' )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ : List[str] =name.replace('norm2' ,'layernorm_after' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] =name.replace('mlp.fc1' ,'intermediate.dense' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_ : str =name.replace('mlp.fc2' ,'output.dense' )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =name.replace('decoder_embed' ,'decoder.decoder_embed' )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_ : int =name.replace('decoder_norm' ,'decoder.decoder_norm' )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_ : str =name.replace('decoder_pred' ,'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_ : Dict =name.replace('norm.weight' ,'vit.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =name.replace('norm.bias' ,'vit.layernorm.bias' )
return name
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : Union[str, Any] ) -> int:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ : List[str] =orig_state_dict.pop(lowerCAmelCase_ )
if "qkv" in key:
SCREAMING_SNAKE_CASE_ : Dict =key.split('.' )
SCREAMING_SNAKE_CASE_ : List[str] =int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE_ : List[Any] =config.decoder_hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] ='decoder.decoder_layers.'
if "weight" in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =val[:dim, :]
SCREAMING_SNAKE_CASE_ : Optional[int] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_ : Optional[int] =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =val[:dim]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_ : int =val[-dim:]
else:
SCREAMING_SNAKE_CASE_ : List[str] =config.hidden_size
SCREAMING_SNAKE_CASE_ : Tuple ='vit.encoder.layer.'
if "weight" in key:
SCREAMING_SNAKE_CASE_ : Any =val[:dim, :]
SCREAMING_SNAKE_CASE_ : Any =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_ : int =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_ : Dict =val[:dim]
SCREAMING_SNAKE_CASE_ : Dict =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_ : int =val[-dim:]
else:
SCREAMING_SNAKE_CASE_ : List[str] =val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : List[str] =1024
SCREAMING_SNAKE_CASE_ : Optional[Any] =4096
SCREAMING_SNAKE_CASE_ : Any =24
SCREAMING_SNAKE_CASE_ : Dict =16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : str =14
SCREAMING_SNAKE_CASE_ : List[Any] =1280
SCREAMING_SNAKE_CASE_ : Optional[Any] =5120
SCREAMING_SNAKE_CASE_ : Optional[Any] =32
SCREAMING_SNAKE_CASE_ : Any =16
SCREAMING_SNAKE_CASE_ : List[str] =ViTMAEForPreTraining(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Dict =torch.hub.load_state_dict_from_url(lowerCAmelCase_ ,map_location='cpu' )['model']
SCREAMING_SNAKE_CASE_ : Optional[int] =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_ : Tuple =convert_state_dict(lowerCAmelCase_ ,lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE_ : int ='https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'
SCREAMING_SNAKE_CASE_ : List[str] =Image.open(requests.get(lowerCAmelCase_ ,stream=lowerCAmelCase_ ).raw )
SCREAMING_SNAKE_CASE_ : Any =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_ : Optional[int] =image_processor(images=lowerCAmelCase_ ,return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_ : Any =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] =outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : str =torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,lowerCAmelCase_ ,atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 220 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = 'Speech2TextFeatureExtractor'
_lowercase = 'Speech2TextTokenizer'
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] =self.feature_extractor
SCREAMING_SNAKE_CASE_ : Optional[int] =False
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
SCREAMING_SNAKE_CASE_ : Any =kwargs.pop('raw_speech' )
else:
SCREAMING_SNAKE_CASE_ : str =kwargs.pop('audio' , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =kwargs.pop('sampling_rate' , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =kwargs.pop('text' , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE_ : Dict =args[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE_ : str =self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ : str =encodings['input_ids']
return inputs
def __lowerCamelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@contextmanager
def __lowerCamelCase ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =True
SCREAMING_SNAKE_CASE_ : int =self.tokenizer
yield
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.feature_extractor
SCREAMING_SNAKE_CASE_ : Tuple =False
| 220 | 1 |
"""simple docstring"""
import re
def lowercase ( UpperCamelCase : str ):
"""simple docstring"""
A__ : str =re.compile(
R"^(?:0|94|\+94|0{2}94)" R"7(0|1|2|4|5|6|7|8)" R"(-| |)" R"\d{7}$" )
return bool(re.search(UpperCamelCase , UpperCamelCase ) )
if __name__ == "__main__":
__A : Optional[Any] = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 595 | """simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A : Optional[Any] = "pt"
elif is_tf_available():
__A : Optional[Any] = "tf"
else:
__A : Tuple = "jax"
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : List[Any] = ByTaTokenizer
__magic_name__ : Optional[Any] = False
def _UpperCAmelCase ( self : int ):
super().setUp()
A__ : List[Any] =ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCAmelCase ( self : List[Any] ):
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def _UpperCAmelCase ( self : List[Any] , **UpperCamelCase__ : Union[str, Any] ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : List[Any]=20 , UpperCamelCase__ : Tuple=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
A__ : int =[]
for i in range(len(UpperCamelCase__ ) ):
try:
A__ : str =tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
A__ : Any =list(filter(lambda UpperCamelCase__ : re.match(R"^[ a-zA-Z]+$" , t[1] ) , UpperCamelCase__ ) )
A__ : str =list(filter(lambda UpperCamelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase__ ) , UpperCamelCase__ ) )
if max_length is not None and len(UpperCamelCase__ ) > max_length:
A__ : int =toks[:max_length]
if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0:
while len(UpperCamelCase__ ) < min_length:
A__ : List[Any] =toks + toks
# toks_str = [t[1] for t in toks]
A__ : str =[t[0] for t in toks]
# Ensure consistency
A__ : Optional[int] =tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
if " " not in output_txt and len(UpperCamelCase__ ) > 1:
A__ : List[Any] =(
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ )
)
if with_prefix_space:
A__ : List[Any] =" " + output_txt
A__ : Tuple =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
return output_txt, output_ids
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Any =self.ta_base_tokenizer
A__ : str =tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
A__ : Any =tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"] , batch_without_eos_added["input_ids"] )
def _UpperCAmelCase ( self : List[Any] ):
A__ : Optional[int] =self.ta_base_tokenizer
A__ : Dict ="Unicode €."
A__ : List[Any] =tokenizer(UpperCamelCase__ )
A__ : List[str] =[88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"] , UpperCamelCase__ )
# decoding
A__ : Tuple =tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , "Unicode €.</s>" )
A__ : Tuple =tokenizer("e è é ê ë" )
A__ : Tuple =[104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"] , UpperCamelCase__ )
# decoding
A__ : Union[str, Any] =tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "e è é ê ë</s>" )
def _UpperCAmelCase ( self : Dict ):
A__ : Dict =self.ta_base_tokenizer
A__ : Any =["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
A__ : Any =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
A__ : Union[str, Any] =tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
if FRAMEWORK != "jax":
A__ : str =list(batch.input_ids.numpy()[0] )
else:
A__ : int =list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : Any =self.ta_base_tokenizer
A__ : List[Any] =["A long paragraph for summarization.", "Another paragraph for summarization."]
A__ : Tuple =tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , UpperCamelCase__ )
self.assertIn("attention_mask" , UpperCamelCase__ )
self.assertNotIn("decoder_input_ids" , UpperCamelCase__ )
self.assertNotIn("decoder_attention_mask" , UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : Union[str, Any] =self.ta_base_tokenizer
A__ : Union[str, Any] =[
"Summary of the text.",
"Another summary.",
]
A__ : Dict =tokenizer(
text_target=UpperCamelCase__ , max_length=32 , padding="max_length" , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _UpperCAmelCase ( self : List[str] ):
A__ : Optional[int] =self.ta_base_tokenizer
A__ : int =["A long paragraph for summarization. </s>"]
A__ : str =["Summary of the text. </s>"]
# fmt: off
A__ : int =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
A__ : str =[86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
A__ : int =tokenizer(UpperCamelCase__ , text_target=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch["input_ids"][0] )
self.assertEqual(UpperCamelCase__ , batch["labels"][0] )
def _UpperCAmelCase ( self : List[Any] ):
# safety check on max_len default value so we are sure the test works
A__ : List[str] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
A__ : List[Any] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
A__ : Tuple =tempfile.mkdtemp()
A__ : List[str] =" He is very happy, UNwant\u00E9d,running"
A__ : str =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
A__ : Optional[Any] =tokenizer.__class__.from_pretrained(UpperCamelCase__ )
A__ : str =after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
shutil.rmtree(UpperCamelCase__ )
A__ : int =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
A__ : str =tempfile.mkdtemp()
A__ : Optional[Any] =" He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
A__ : Dict =tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
A__ : Tuple =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
A__ : List[str] =tokenizer.__class__.from_pretrained(UpperCamelCase__ )
A__ : List[str] =after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
A__ : List[str] =tokenizer.__class__.from_pretrained(UpperCamelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase__ )
def _UpperCAmelCase ( self : Dict ):
A__ : List[str] =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
A__ : List[Any] =json.load(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
A__ : Tuple =json.load(UpperCamelCase__ )
A__ : int =[F'''<extra_id_{i}>''' for i in range(125 )]
A__ : int =added_tokens_extra_ids + [
"an_additional_special_token"
]
A__ : Optional[Any] =added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(UpperCamelCase__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
A__ : Tuple =tokenizer_class.from_pretrained(
UpperCamelCase__ , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
A__ : Optional[int] =added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=UpperCamelCase__ )]
A__ : Union[str, Any] =tokenizer_class.from_pretrained(
UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def _UpperCAmelCase ( self : List[Any] ):
A__ : str =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
A__ : Optional[int] =tokenizer_class.from_pretrained(UpperCamelCase__ )
self.assertTrue(tokenizer.decode([255] ) == "" )
def _UpperCAmelCase ( self : List[Any] ):
pass
def _UpperCAmelCase ( self : Optional[Any] ):
pass
def _UpperCAmelCase ( self : Optional[int] ):
pass
def _UpperCAmelCase ( self : Optional[int] ):
pass
def _UpperCAmelCase ( self : Any ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
A__ : Union[str, Any] =self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
A__ : Tuple =["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
A__ : Optional[Any] =tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] ):
A__ : Union[str, Any] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
A__ : List[str] =[
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
A__ : List[Any] =0
A__ : List[Any] =tokenizer.convert_ids_to_tokens(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
for attr in attributes_list:
setattr(UpperCamelCase__ , attr + "_id" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + "_id" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , attr + "_id" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + "_id" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(UpperCamelCase__ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(UpperCamelCase__ , "additional_special_tokens_ids" ) , [] )
setattr(UpperCamelCase__ , "additional_special_tokens_ids" , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , "additional_special_tokens" ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , "additional_special_tokens_ids" ) , [token_id_to_test_setters] )
| 595 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = LEDTokenizer
A_ = LEDTokenizerFast
A_ = True
def UpperCAmelCase__ ( self) -> int:
super().setUp()
UpperCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_))))
UpperCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase = {'''unk_token''': '''<unk>'''}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(lowerCamelCase_) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(lowerCamelCase_))
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Union[str, Any]:
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Optional[int]:
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase__ ( self) -> Any:
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''')
@cached_property
def UpperCAmelCase__ ( self) -> Any:
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''')
@require_torch
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCamelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(lowerCamelCase_ , max_length=len(lowerCamelCase_) , padding=lowerCamelCase_ , return_tensors='''pt''')
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
@require_torch
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''pt''')
self.assertIn('''input_ids''' , lowerCamelCase_)
self.assertIn('''attention_mask''' , lowerCamelCase_)
self.assertNotIn('''labels''' , lowerCamelCase_)
self.assertNotIn('''decoder_attention_mask''' , lowerCamelCase_)
@require_torch
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(text_target=lowerCamelCase_ , max_length=3_2 , padding='''max_length''' , return_tensors='''pt''')
self.assertEqual(3_2 , targets['''input_ids'''].shape[1])
@require_torch
def UpperCAmelCase__ ( self) -> Union[str, Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(
['''I am a small frog''' * 1_0_2_4, '''I am a small frog'''] , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors='''pt''')
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_)
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2))
@require_torch
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = ['''A long paragraph for summarization.''']
UpperCamelCase = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(lowerCamelCase_ , return_tensors='''pt''')
UpperCamelCase = tokenizer(text_target=lowerCamelCase_ , return_tensors='''pt''')
UpperCamelCase = inputs['''input_ids''']
UpperCamelCase = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
@require_torch
def UpperCAmelCase__ ( self) -> Dict:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = ['''Summary of the text.''', '''Another summary.''']
UpperCamelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCamelCase = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_)
UpperCamelCase = [[0] * len(lowerCamelCase_) for x in encoded_output['''input_ids''']]
UpperCamelCase = tokenizer.pad(lowerCamelCase_)
self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Tuple:
pass
def UpperCAmelCase__ ( self) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = '''A, <mask> AllenNLP sentence.'''
UpperCamelCase = tokenizer_r.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_)
UpperCamelCase = tokenizer_p.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_)
self.assertEqual(sum(tokens_r['''token_type_ids''']) , sum(tokens_p['''token_type_ids''']))
self.assertEqual(
sum(tokens_r['''attention_mask''']) / len(tokens_r['''attention_mask''']) , sum(tokens_p['''attention_mask''']) / len(tokens_p['''attention_mask''']) , )
UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''])
UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''])
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(
lowerCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
self.assertSequenceEqual(
lowerCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>''']) | 34 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
SCREAMING_SNAKE_CASE_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
SCREAMING_SNAKE_CASE_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> Any:
if return_pvalue:
UpperCamelCase = pearsonr(lowerCamelCase_ , lowerCamelCase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCamelCase_ , lowerCamelCase_)[0])} | 34 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def snake_case_ (__A : Dict ) -> Dict:
__lowerCAmelCase : Tuple = 3_8_4
__lowerCAmelCase : List[str] = 7
if "tiny" in model_name:
__lowerCAmelCase : int = 9_6
__lowerCAmelCase : Tuple = (2, 2, 6, 2)
__lowerCAmelCase : Tuple = (3, 6, 1_2, 2_4)
elif "small" in model_name:
__lowerCAmelCase : List[str] = 9_6
__lowerCAmelCase : int = (2, 2, 1_8, 2)
__lowerCAmelCase : Dict = (3, 6, 1_2, 2_4)
elif "base" in model_name:
__lowerCAmelCase : Tuple = 1_2_8
__lowerCAmelCase : Union[str, Any] = (2, 2, 1_8, 2)
__lowerCAmelCase : Dict = (4, 8, 1_6, 3_2)
__lowerCAmelCase : Tuple = 1_2
__lowerCAmelCase : int = 5_1_2
elif "large" in model_name:
__lowerCAmelCase : Tuple = 1_9_2
__lowerCAmelCase : Dict = (2, 2, 1_8, 2)
__lowerCAmelCase : Tuple = (6, 1_2, 2_4, 4_8)
__lowerCAmelCase : Optional[Any] = 1_2
__lowerCAmelCase : Optional[int] = 7_6_8
# set label information
__lowerCAmelCase : Dict = 1_5_0
__lowerCAmelCase : Union[str, Any] = """huggingface/label-files"""
__lowerCAmelCase : Tuple = """ade20k-id2label.json"""
__lowerCAmelCase : List[str] = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase : List[Any] = {int(__A ): v for k, v in idalabel.items()}
__lowerCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
__lowerCAmelCase : Any = SwinConfig(
embed_dim=__A , depths=__A , num_heads=__A , window_size=__A , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
__lowerCAmelCase : List[Any] = UperNetConfig(
backbone_config=__A , auxiliary_in_channels=__A , num_labels=__A , idalabel=__A , labelaid=__A , )
return config
def snake_case_ (__A : int ) -> Optional[Any]:
__lowerCAmelCase : Dict = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def snake_case_ (__A : Dict , __A : Any , __A : List[Any] ) -> Tuple:
__lowerCAmelCase : Optional[int] = dct.pop(__A )
__lowerCAmelCase : int = val
def snake_case_ (__A : Optional[Any] , __A : str ) -> Optional[Any]:
__lowerCAmelCase : Tuple = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCAmelCase : List[str] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCAmelCase : Any = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
__lowerCAmelCase : Dict = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase : Optional[int] = in_proj_weight[:dim, :]
__lowerCAmelCase : str = in_proj_bias[: dim]
__lowerCAmelCase : str = in_proj_weight[
dim : dim * 2, :
]
__lowerCAmelCase : Optional[int] = in_proj_bias[
dim : dim * 2
]
__lowerCAmelCase : List[Any] = in_proj_weight[
-dim :, :
]
__lowerCAmelCase : List[str] = in_proj_bias[-dim :]
# fmt: on
def snake_case_ (__A : Optional[Any] ) -> Optional[int]:
__lowerCAmelCase : Any = x.shape
__lowerCAmelCase : Union[str, Any] = x.reshape(__A , 4 , in_channel // 4 )
__lowerCAmelCase : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(__A , __A )
return x
def snake_case_ (__A : Dict ) -> Optional[int]:
__lowerCAmelCase : str = x.shape
__lowerCAmelCase : List[Any] = x.reshape(__A , in_channel // 4 , 4 )
__lowerCAmelCase : List[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(__A , __A )
return x
def snake_case_ (__A : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase : Optional[Any] = x.shape[0]
__lowerCAmelCase : List[str] = x.reshape(4 , in_channel // 4 )
__lowerCAmelCase : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(__A )
return x
def snake_case_ (__A : Optional[Any] ) -> Any:
__lowerCAmelCase : Dict = x.shape[0]
__lowerCAmelCase : str = x.reshape(in_channel // 4 , 4 )
__lowerCAmelCase : Union[str, Any] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(__A )
return x
def snake_case_ (__A : List[Any] , __A : str , __A : Optional[int] ) -> Tuple:
__lowerCAmelCase : List[str] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
__lowerCAmelCase : List[Any] = model_name_to_url[model_name]
__lowerCAmelCase : Dict = torch.hub.load_state_dict_from_url(__A , map_location="""cpu""" , file_name=__A )[
"""state_dict"""
]
for name, param in state_dict.items():
print(__A , param.shape )
__lowerCAmelCase : Tuple = get_upernet_config(__A )
__lowerCAmelCase : int = UperNetForSemanticSegmentation(__A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__lowerCAmelCase : Union[str, Any] = state_dict.pop(__A )
if "bn" in key:
__lowerCAmelCase : Dict = key.replace("""bn""" , """batch_norm""" )
__lowerCAmelCase : Any = val
# rename keys
__lowerCAmelCase : Any = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__lowerCAmelCase : Optional[Any] = reverse_correct_unfold_reduction_order(__A )
if "norm" in key:
__lowerCAmelCase : Dict = reverse_correct_unfold_norm_order(__A )
model.load_state_dict(__A )
# verify on image
__lowerCAmelCase : List[Any] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
__lowerCAmelCase : str = Image.open(requests.get(__A , stream=__A ).raw ).convert("""RGB""" )
__lowerCAmelCase : Any = SegformerImageProcessor()
__lowerCAmelCase : int = processor(__A , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
__lowerCAmelCase : str = model(__A )
__lowerCAmelCase : str = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__lowerCAmelCase : int = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
__lowerCAmelCase : str = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
__lowerCAmelCase : Tuple = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
__lowerCAmelCase : Optional[int] = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __A , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__A )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__A )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[F'upernet-swin-{size}' for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCAmelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 705 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] =BertJapaneseTokenizer
lowerCamelCase : List[str] =False
lowerCamelCase : Tuple =True
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : str = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
__lowerCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : str = """こんにちは、世界。 \nこんばんは、世界。"""
__lowerCAmelCase : str = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = self.get_input_output_texts(lowerCAmelCase )
__lowerCAmelCase : int = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
__lowerCAmelCase : Any = tokenizer.decode(lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
return text, ids
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : str = self.tokenizer_class(self.vocab_file )
__lowerCAmelCase : str = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(lowerCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" )
self.assertIsNotNone(lowerCAmelCase )
__lowerCAmelCase : List[str] = """こんにちは、世界。\nこんばんは、世界。"""
__lowerCAmelCase : int = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(lowerCAmelCase , """wb""" ) as handle:
pickle.dump(lowerCAmelCase , lowerCAmelCase )
with open(lowerCAmelCase , """rb""" ) as handle:
__lowerCAmelCase : List[str] = pickle.load(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = tokenizer_new.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
"""simple docstring"""
__lowerCAmelCase : Dict = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
"""simple docstring"""
try:
__lowerCAmelCase : Optional[Any] = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
"""simple docstring"""
try:
__lowerCAmelCase : Dict = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = MecabTokenizer(do_lower_case=lowerCAmelCase , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
"""simple docstring"""
try:
__lowerCAmelCase : str = MecabTokenizer(
do_lower_case=lowerCAmelCase , normalize_text=lowerCAmelCase , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = MecabTokenizer(normalize_text=lowerCAmelCase , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(lowerCAmelCase )
__lowerCAmelCase : Tuple = """こんにちは、世界。\nこんばんは、世界。"""
__lowerCAmelCase : Optional[int] = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(lowerCAmelCase , """wb""" ) as handle:
pickle.dump(lowerCAmelCase , lowerCAmelCase )
with open(lowerCAmelCase , """rb""" ) as handle:
__lowerCAmelCase : Tuple = pickle.load(lowerCAmelCase )
__lowerCAmelCase : Tuple = tokenizer_new.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
"""simple docstring"""
__lowerCAmelCase : str = SudachiTokenizer(do_lower_case=lowerCAmelCase , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = SudachiTokenizer(normalize_text=lowerCAmelCase , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = SudachiTokenizer(trim_whitespace=lowerCAmelCase , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = """こんにちは、世界。\nこんばんは、世界。"""
__lowerCAmelCase : Optional[Any] = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(lowerCAmelCase , """wb""" ) as handle:
pickle.dump(lowerCAmelCase , lowerCAmelCase )
with open(lowerCAmelCase , """rb""" ) as handle:
__lowerCAmelCase : Optional[int] = pickle.load(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = tokenizer_new.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = JumanppTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = JumanppTokenizer(normalize_text=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
"""simple docstring"""
__lowerCAmelCase : str = JumanppTokenizer(trim_whitespace=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : str = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
__lowerCAmelCase : Any = {}
for i, token in enumerate(lowerCAmelCase ):
__lowerCAmelCase : str = i
__lowerCAmelCase : Tuple = WordpieceTokenizer(vocab=lowerCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
__lowerCAmelCase : Dict = tokenizer.subword_tokenizer
__lowerCAmelCase : Dict = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(lowerCAmelCase , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
__lowerCAmelCase : str = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(lowerCAmelCase , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
__lowerCAmelCase : Any = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Dict =BertJapaneseTokenizer
lowerCamelCase : Optional[Any] =False
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : List[str] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__lowerCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , **lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Tuple ) -> str:
"""simple docstring"""
__lowerCAmelCase : str = """こんにちは、世界。 \nこんばんは、世界。"""
__lowerCAmelCase : Optional[Any] = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" )
__lowerCAmelCase : List[Any] = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
lowerCAmelCase , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__lowerCAmelCase : List[str] = {}
for i, token in enumerate(lowerCAmelCase ):
__lowerCAmelCase : List[Any] = i
__lowerCAmelCase : Dict = CharacterTokenizer(vocab=lowerCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
"""simple docstring"""
__lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
__lowerCAmelCase : List[Any] = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowerCAmelCase )
__lowerCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = """cl-tohoku/bert-base-japanese"""
__lowerCAmelCase : int = AutoTokenizer.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(lowerCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
__lowerCAmelCase : Optional[int] = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 218 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def __lowercase ( _UpperCAmelCase ) -> str:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__lowercase = k.replace(_UpperCAmelCase , _UpperCAmelCase )
if k.startswith("encoder" ):
__lowercase = k.replace(".attn" , ".self_attn" )
__lowercase = k.replace("norm1" , "self_attn_layer_norm" )
__lowercase = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
__lowercase = k.replace("norm1" , "self_attn_layer_norm" )
__lowercase = k.replace("norm2" , "encoder_attn_layer_norm" )
__lowercase = k.replace("norm3" , "final_layer_norm" )
return k
def __lowercase ( _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__lowercase = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
__lowercase = sd.pop(_UpperCAmelCase )
__lowercase = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
__lowercase = v
lowerCAmelCase__ = ['START']
@torch.no_grad()
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = torch.load(_UpperCAmelCase , map_location="cpu" )
__lowercase = model["model"]
__lowercase = BlenderbotConfig.from_json_file(_UpperCAmelCase )
__lowercase = BlenderbotForConditionalGeneration(_UpperCAmelCase )
__lowercase = m.model.state_dict().keys()
__lowercase = []
__lowercase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__lowercase = rename_state_dict_key(_UpperCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__lowercase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_UpperCAmelCase )
m.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
m.half()
m.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
lowerCAmelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 321 | import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = ["""image_processor""", """tokenizer"""]
__lowerCAmelCase = """LayoutLMv2ImageProcessor"""
__lowerCAmelCase = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase_ , )
__lowercase = kwargs.pop("feature_extractor" )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = True , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
__lowercase = self.image_processor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase = features["words"]
__lowercase = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
# add pixel values
__lowercase = features.pop("pixel_values" )
if return_overflowing_tokens is True:
__lowercase = self.get_overflowing_images(lowerCAmelCase_ , encoded_inputs["overflow_to_sample_mapping"] )
__lowercase = images
return encoded_inputs
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f''' {len(lowerCAmelCase_ )} and {len(lowerCAmelCase_ )}''' )
return images_with_overflow
def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def snake_case__ ( self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def snake_case__ ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCAmelCase_ , )
return self.image_processor_class
@property
def snake_case__ ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCAmelCase_ , )
return self.image_processor
| 321 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''LayoutLMv2ImageProcessor'''
UpperCamelCase_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : List[str] , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Any =kwargs.pop('''feature_extractor''' )
lowercase : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase : Tuple =self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Optional[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : List[str] =features['''words''']
lowercase : Optional[Any] =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
lowercase : List[str] =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase : str =self.get_overflowing_images(UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase : Dict =images
return encoded_inputs
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : str =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}' )
return images_with_overflow
def A__ ( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 8 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 8 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
SCREAMING_SNAKE_CASE__ : Optional[int] =logging.getLogger(__name__)
torch.set_grad_enabled(False)
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='cuda' if torch.cuda.is_available() else 'cpu'
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=" " ) ->List[str]:
_lowerCamelCase : Optional[int] = text.split(__UpperCAmelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )]
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->dict:
_lowerCamelCase, _lowerCamelCase : Optional[int] = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(__UpperCAmelCase ):
titles.append(title if title is not None else '''''' )
texts.append(__UpperCAmelCase )
return {"title": titles, "text": texts}
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->dict:
_lowerCamelCase : str = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=__UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
_lowerCamelCase : Optional[Any] = ctx_encoder(input_ids.to(device=__UpperCAmelCase ) , return_dict=__UpperCAmelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) ->Any:
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_lowerCamelCase : int = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_lowerCamelCase : Optional[int] = dataset.map(__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
_lowerCamelCase : List[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__UpperCAmelCase )
_lowerCamelCase : Union[str, Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_lowerCamelCase : List[str] = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
_lowerCamelCase : Dict = dataset.map(
partial(__UpperCAmelCase , ctx_encoder=__UpperCAmelCase , ctx_tokenizer=__UpperCAmelCase ) , batched=__UpperCAmelCase , batch_size=processing_args.batch_size , features=__UpperCAmelCase , )
# And finally save your dataset
_lowerCamelCase : Dict = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(__UpperCAmelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_lowerCamelCase : Dict = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=__UpperCAmelCase )
# And save the index
_lowerCamelCase : Tuple = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(__UpperCAmelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = field(
default=str(Path(_snake_case ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns \'title\' and \'text\'"""} , )
__snake_case = field(
default=_snake_case , metadata={"""help""": """Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'."""} , )
__snake_case = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\'"""} , )
__snake_case = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or"""
""" \'facebook/dpr-ctx_encoder-multiset-base\'"""
)
} , )
__snake_case = field(
default=str(Path(_snake_case ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = field(
default=_snake_case , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
__snake_case = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = field(
default=768 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
__snake_case = field(
default=128 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
SCREAMING_SNAKE_CASE__ : int =HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] =parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : int =rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 434 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = b.T
__SCREAMING_SNAKE_CASE = np.sum(np.square(__UpperCAmelCase ) , axis=1 )
__SCREAMING_SNAKE_CASE = np.sum(np.square(__UpperCAmelCase ) , axis=0 )
__SCREAMING_SNAKE_CASE = np.matmul(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = aa[:, None] - 2 * ab + ba[None, :]
return d
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = x.reshape(-1 , 3 )
__SCREAMING_SNAKE_CASE = squared_euclidean_distance(__UpperCAmelCase , __UpperCAmelCase )
return np.argmin(__UpperCAmelCase , axis=1 )
class __a ( _snake_case ):
__UpperCamelCase : Any = ['pixel_values']
def __init__( self : Any ,lowerCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None ,lowerCamelCase : bool = True ,lowerCamelCase : Dict[str, int] = None ,lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase : bool = True ,lowerCamelCase : bool = True ,**lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 256, """width""": 256}
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase )
__SCREAMING_SNAKE_CASE = np.array(lowerCamelCase ) if clusters is not None else None
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = do_color_quantize
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : np.ndarray ,lowerCamelCase : Dict[str, int] ,lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase : int ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
lowerCamelCase ,size=(size["""height"""], size["""width"""]) ,resample=lowerCamelCase ,data_format=lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : np.ndarray ,lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = rescale(image=lowerCamelCase ,scale=1 / 127.5 ,data_format=lowerCamelCase )
__SCREAMING_SNAKE_CASE = image - 1
return image
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : ImageInput ,lowerCamelCase : bool = None ,lowerCamelCase : Dict[str, int] = None ,lowerCamelCase : PILImageResampling = None ,lowerCamelCase : bool = None ,lowerCamelCase : Optional[bool] = None ,lowerCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None ,lowerCamelCase : Optional[Union[str, TensorType]] = None ,lowerCamelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST ,**lowerCamelCase : Any ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase )
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__SCREAMING_SNAKE_CASE = clusters if clusters is not None else self.clusters
__SCREAMING_SNAKE_CASE = np.array(lowerCamelCase )
__SCREAMING_SNAKE_CASE = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=lowerCamelCase ,size=lowerCamelCase ,resample=lowerCamelCase ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=lowerCamelCase ) for image in images]
if do_color_quantize:
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(lowerCamelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__SCREAMING_SNAKE_CASE = np.array(lowerCamelCase )
__SCREAMING_SNAKE_CASE = color_quantize(lowerCamelCase ,lowerCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__SCREAMING_SNAKE_CASE = images.shape[0]
__SCREAMING_SNAKE_CASE = images.reshape(lowerCamelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(lowerCamelCase ,lowerCamelCase ) for image in images]
__SCREAMING_SNAKE_CASE = {"""input_ids""": images}
return BatchFeature(data=lowerCamelCase ,tensor_type=lowerCamelCase )
| 109 | 0 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
SCREAMING_SNAKE_CASE__ = 6_37_81_37.0
SCREAMING_SNAKE_CASE__ = 6_35_67_52.31_42_45
SCREAMING_SNAKE_CASE__ = 6_378_137
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
lowerCAmelCase = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowerCAmelCase = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE ) ) )
lowerCAmelCase = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowerCAmelCase = haversine_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowerCAmelCase = (b_lata + b_lata) / 2
lowerCAmelCase = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowerCAmelCase = (sin(SCREAMING_SNAKE_CASE ) ** 2) * (cos(SCREAMING_SNAKE_CASE ) ** 2)
lowerCAmelCase = cos(sigma / 2 ) ** 2
lowerCAmelCase = (sigma - sin(SCREAMING_SNAKE_CASE )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowerCAmelCase = (cos(SCREAMING_SNAKE_CASE ) ** 2) * (sin(SCREAMING_SNAKE_CASE ) ** 2)
lowerCAmelCase = sin(sigma / 2 ) ** 2
lowerCAmelCase = (sigma + sin(SCREAMING_SNAKE_CASE )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'table-transformer'
_SCREAMING_SNAKE_CASE = ['past_key_values']
_SCREAMING_SNAKE_CASE = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , lowercase=True , lowercase=None , lowercase=3 , lowercase=100 , lowercase=6 , lowercase=2_048 , lowercase=8 , lowercase=6 , lowercase=2_048 , lowercase=8 , lowercase=0.0 , lowercase=0.0 , lowercase=True , lowercase="relu" , lowercase=256 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1.0 , lowercase=False , lowercase="sine" , lowercase="resnet50" , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=1 , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase , lowercase ):
lowerCAmelCase = backbone_config.get("""model_type""" )
lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase = config_class.from_dict(lowercase )
# set timm attributes to None
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None, None, None
lowerCAmelCase = use_timm_backbone
lowerCAmelCase = backbone_config
lowerCAmelCase = num_channels
lowerCAmelCase = num_queries
lowerCAmelCase = d_model
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = init_xavier_std
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = encoder_layers
lowerCAmelCase = auxiliary_loss
lowerCAmelCase = position_embedding_type
lowerCAmelCase = backbone
lowerCAmelCase = use_pretrained_backbone
lowerCAmelCase = dilation
# Hungarian matcher
lowerCAmelCase = class_cost
lowerCAmelCase = bbox_cost
lowerCAmelCase = giou_cost
# Loss coefficients
lowerCAmelCase = mask_loss_coefficient
lowerCAmelCase = dice_loss_coefficient
lowerCAmelCase = bbox_loss_coefficient
lowerCAmelCase = giou_loss_coefficient
lowerCAmelCase = eos_coefficient
super().__init__(is_encoder_decoder=lowercase , **lowercase )
@property
def _snake_case ( self ) -> int:
return self.encoder_attention_heads
@property
def _snake_case ( self ) -> int:
return self.d_model
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = version.parse('1.11' )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-5
@property
def _snake_case ( self ) -> int:
return 12
| 393 | 1 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
_lowerCAmelCase : List[str] = parse(importlib.metadata.version('''torch'''))
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
_lowerCamelCase : Any = STR_OPERATION_TO_FUNC[operation]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Any = parse(importlib.metadata.version(_lowerCamelCase ) )
return operation(_lowerCamelCase , parse(_lowerCamelCase ) )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
return compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) | 46 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __a ( __UpperCamelCase ):
__snake_case : Union[str, Any] = ComputeEnvironment.AMAZON_SAGEMAKER
__snake_case : Tuple = True
__snake_case : List[str] = """ml.p3.2xlarge"""
__snake_case : Optional[int] = """accelerate_sagemaker_execution_role"""
__snake_case : List[Any] = """hf-sm"""
__snake_case : str = """us-east-1"""
__snake_case : int = 1
__snake_case : int = """accelerate-sagemaker-1"""
__snake_case : Union[str, Any] = """1.6"""
__snake_case : Tuple = """4.4"""
__snake_case : List[str] = """train.py"""
__snake_case : str = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
__snake_case : List[Any] = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class __a ( unittest.TestCase ):
def A ( self : int ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
lowerCAmelCase_ : Dict = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] , UpperCAmelCase )
assert isinstance(converted_args["""do_train"""] , UpperCAmelCase )
assert isinstance(converted_args["""epochs"""] , UpperCAmelCase )
assert isinstance(converted_args["""learning_rate"""] , UpperCAmelCase )
assert isinstance(converted_args["""max_steps"""] , UpperCAmelCase )
with pytest.raises(UpperCAmelCase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 600 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ : Optional[int] = """▁"""
lowerCamelCase_ : List[str] = {"""vocab_file""": """spiece.model"""}
lowerCamelCase_ : Any = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
lowerCamelCase_ : List[Any] = {
"""google/pegasus-xsum""": 512,
}
lowerCamelCase_ : List[str] = logging.get_logger(__name__)
class a__ ( __snake_case ):
A__ : Tuple = VOCAB_FILES_NAMES
A__ : List[str] = VOCAB_FILES_NAMES
A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Tuple = ['input_ids', 'attention_mask']
def __init__( self , UpperCAmelCase , UpperCAmelCase="<pad>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<mask_2>" , UpperCAmelCase="<mask_1>" , UpperCAmelCase=None , UpperCAmelCase=1_0_3 , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
__a = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(UpperCAmelCase )}, but is'''
f''' {type(UpperCAmelCase )}''' )
__a = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(UpperCAmelCase ) , self.offset - 1 )
]
if len(set(UpperCAmelCase ) ) != len(UpperCAmelCase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
__a = additional_special_tokens_extended
else:
__a = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , mask_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token_sent=UpperCAmelCase , offset=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
__a = mask_token_sent
__a = vocab_file
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase )
# add special tokens to encoder dict
__a = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__a = {v: k for k, v in self.encoder.items()}
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return len(self.sp_model ) + self.offset
def __SCREAMING_SNAKE_CASE ( self ) -> Dict[str, int]:
__a = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self , UpperCAmelCase ) -> Optional[int]:
__a = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__a = self.sp_model.piece_to_id(UpperCAmelCase )
return sp_id + self.offset
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__a = self.sp_model.IdToPiece(index - self.offset )
return token
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> List[Any]:
__a = []
__a = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCAmelCase ) + token
__a = []
else:
current_sub_tokens.append(UpperCAmelCase )
out_string += self.sp_model.decode(UpperCAmelCase )
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase=False ) -> int:
return 1
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Any:
__a = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , 'wb' ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
| 719 | from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class a__ :
A__ : List[Any] = MBartConfig
A__ : Any = {}
A__ : List[str] = 'gelu'
def __init__( self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=2_0 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=0 , ) -> List[str]:
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = eos_token_id
__a = pad_token_id
__a = bos_token_id
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
__a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__a = tf.concat([input_ids, eos_tensor] , axis=1 )
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__a = prepare_mbart_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase ) -> int:
__a = TFMBartModel(config=UpperCAmelCase ).get_decoder()
__a = inputs_dict['input_ids']
__a = input_ids[:1, :]
__a = inputs_dict['attention_mask'][:1, :]
__a = inputs_dict['head_mask']
__a = 1
# first forward pass
__a = model(UpperCAmelCase , attention_mask=UpperCAmelCase , head_mask=UpperCAmelCase , use_cache=UpperCAmelCase )
__a , __a = outputs.to_tuple()
__a = past_key_values[1]
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ):
if attention_mask is None:
__a = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__a = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__a = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a__ ( __snake_case , __snake_case , unittest.TestCase ):
A__ : List[Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
A__ : Any = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
A__ : List[str] = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
A__ : int = True
A__ : List[str] = False
A__ : Union[str, Any] = False
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
__a = TFMBartModelTester(self )
__a = ConfigTester(self , config_class=UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
__a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class a__ ( unittest.TestCase ):
A__ : Optional[Any] = [
' UN Chief Says There Is No Military Solution in Syria',
]
A__ : List[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
A__ : List[Any] = 'facebook/mbart-large-en-ro'
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase ) -> Dict:
__a = self.translate_src_text(**UpperCAmelCase )
self.assertListEqual(self.expected_text , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase ) -> int:
__a = self.tokenizer(self.src_text , **UpperCAmelCase , return_tensors='tf' )
__a = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__a = self.tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
return generated_words
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
self._assert_generated_batch_equal_expected()
| 246 | 0 |
'''simple docstring'''
def lowercase__( _UpperCamelCase : str )-> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__lowerCAmelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 138 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCAmelCase : List[Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Dict , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
super().__init__()
lowercase_ = torchvision.models.resnetaaa(pretrained=lowerCAmelCase_)
lowercase_ = list(model.children())[:-2]
lowercase_ = nn.Sequential(*lowerCAmelCase_)
lowercase_ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds])
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = self.pool(self.model(lowerCAmelCase_))
lowercase_ = torch.flatten(lowerCAmelCase_ , start_dim=2)
lowercase_ = out.transpose(1 , 2).contiguous()
return out # BxNx2048
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = [json.loads(lowerCAmelCase_) for l in open(lowerCAmelCase_)]
lowercase_ = os.path.dirname(lowerCAmelCase_)
lowercase_ = tokenizer
lowercase_ = labels
lowercase_ = len(lowerCAmelCase_)
lowercase_ = max_seq_length
lowercase_ = transforms
def __len__( self : List[Any]):
"""simple docstring"""
return len(self.data)
def __getitem__( self : Tuple , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=lowerCAmelCase_))
lowercase_ , lowercase_ , lowercase_ = sentence[0], sentence[1:-1], sentence[-1]
lowercase_ = sentence[: self.max_seq_length]
lowercase_ = torch.zeros(self.n_classes)
lowercase_ = 1
lowercase_ = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""])).convert("""RGB""")
lowercase_ = self.transforms(lowerCAmelCase_)
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Counter()
for row in self.data:
label_freqs.update(row["""label"""])
return label_freqs
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = [len(row["""sentence"""] ) for row in batch]
lowercase_ , lowercase_ = len(__lowerCAmelCase ), max(__lowerCAmelCase )
lowercase_ = torch.zeros(__lowerCAmelCase , __lowerCAmelCase , dtype=torch.long )
lowercase_ = torch.zeros(__lowerCAmelCase , __lowerCAmelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__lowerCAmelCase , __lowerCAmelCase ) ):
lowercase_ = input_row["""sentence"""]
lowercase_ = 1
lowercase_ = torch.stack([row["""image"""] for row in batch] )
lowercase_ = torch.stack([row["""label"""] for row in batch] )
lowercase_ = torch.stack([row["""image_start_token"""] for row in batch] )
lowercase_ = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _SCREAMING_SNAKE_CASE () -> Dict:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _SCREAMING_SNAKE_CASE () -> List[Any]:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 567 | 0 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_a: Union[str, Any] = pd.read_csv(
"""https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"""
"""position_salaries.csv"""
)
_a: Tuple = dataset.iloc[:, 1:2].values
_a: Optional[Any] = dataset.iloc[:, 2].values
_a: List[Any] = train_test_split(X, y, test_size=0.2, random_state=0)
_a: Any = PolynomialFeatures(degree=4)
_a: Union[str, Any] = poly_reg.fit_transform(X)
_a: Optional[int] = LinearRegression()
pol_reg.fit(X_poly, y)
def __lowerCAmelCase ( ):
plt.scatter(_lowerCamelCase , _lowerCamelCase , color="red" )
plt.plot(_lowerCamelCase , pol_reg.predict(poly_reg.fit_transform(_lowerCamelCase ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003 | 704 |
from sklearn.metrics import mean_squared_error
import datasets
_a: Any = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
_a: List[Any] = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
_a: List[str] = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __A ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def __A ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def __A ( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : str=None , lowerCAmelCase : str="uniform_average" , lowerCAmelCase : Any=True ):
'''simple docstring'''
UpperCAmelCase_ = mean_squared_error(
lowerCAmelCase , lowerCAmelCase , sample_weight=lowerCAmelCase , multioutput=lowerCAmelCase , squared=lowerCAmelCase )
return {"mse": mse} | 268 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A__( unittest.TestCase ):
lowerCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def _a ( self : Any ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (3, 32, 1_28)
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
# fmt: off
__SCREAMING_SNAKE_CASE = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__SCREAMING_SNAKE_CASE = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
__SCREAMING_SNAKE_CASE = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 1_28},
}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] , **__SCREAMING_SNAKE_CASE : Dict ) -> Dict:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
__SCREAMING_SNAKE_CASE = Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) )
return image_input
def _a ( self : str ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
__SCREAMING_SNAKE_CASE = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
__SCREAMING_SNAKE_CASE = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = '''test'''
__SCREAMING_SNAKE_CASE = processor(text=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer(__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = '''test'''
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE = processor.char_decode(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def _a ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.randn(1 , 27 , 38 )
__SCREAMING_SNAKE_CASE = torch.randn(1 , 27 , 5_02_57 )
__SCREAMING_SNAKE_CASE = torch.randn(1 , 27 , 3_05_22 )
__SCREAMING_SNAKE_CASE = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 482 |
"""simple docstring"""
def _a ( UpperCAmelCase__ = 10 ) -> str:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or n < 0:
raise ValueError('''Invalid input''' )
__SCREAMING_SNAKE_CASE = 10**n
__SCREAMING_SNAKE_CASE = 2_84_33 * (pow(2 , 7_83_04_57 , UpperCAmelCase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(10) = }''')
| 482 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCamelCase ( __snake_case ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase__ : Dict = None , UpperCAmelCase__ : Tuple = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : str = None , UpperCAmelCase__ : Any = False , UpperCAmelCase__ : Tuple = False , UpperCAmelCase__ : Any = None , **UpperCAmelCase__ : int , ) ->Optional[Any]:
UpperCAmelCase_ = path_or_paths
UpperCAmelCase_ = split if split or isinstance(A_ , A_ ) else "train"
UpperCAmelCase_ = features
UpperCAmelCase_ = cache_dir
UpperCAmelCase_ = keep_in_memory
UpperCAmelCase_ = streaming
UpperCAmelCase_ = num_proc
UpperCAmelCase_ = kwargs
@abstractmethod
def lowerCAmelCase__ ( self : Optional[int] ) ->Dict:
pass
class lowerCamelCase ( __snake_case ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase__ : Tuple = None , UpperCAmelCase__ : Optional[Any] = None , UpperCAmelCase__ : Any = False , UpperCAmelCase__ : Optional[Any] = False , UpperCAmelCase__ : Optional[int] = None , **UpperCAmelCase__ : str , ) ->Optional[int]:
UpperCAmelCase_ = features
UpperCAmelCase_ = cache_dir
UpperCAmelCase_ = keep_in_memory
UpperCAmelCase_ = streaming
UpperCAmelCase_ = num_proc
UpperCAmelCase_ = kwargs
@abstractmethod
def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple:
pass
| 716 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ : Optional[Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowercase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 43 | 0 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : int = 0
__UpperCamelCase : bool = False
__UpperCamelCase : float = 3.0
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Tuple ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=snake_case_ ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} )
@require_cuda
def lowerCAmelCase__ ( self : Union[str, Any] ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase_: List[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCamelCase_: Optional[Any] = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCamelCase_: int = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , snake_case_ )
@require_multi_gpu
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Optional[int] = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase_ : List[str] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCamelCase_ : Union[str, Any] = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCamelCase_ : List[Any] = torch.nn.Linear(1_00, 2_00)
lowerCamelCase_ : Any = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCamelCase_ : List[Any] = """"""
lowerCamelCase_ : int = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 548 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase_ : Optional[int] = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 548 | 1 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class A ( lowercase__ ):
__snake_case = 42
__snake_case = jnp.floataa
__snake_case = True
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
super().setup()
lowerCAmelCase_ = nn.Dense(5, dtype=self.dtype )
def __call__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = super().__call__(*UpperCAmelCase__, **UpperCAmelCase__ )
lowerCAmelCase_ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class A ( lowercase__ ):
__snake_case = FlaxBigBirdForNaturalQuestionsModule
def __UpperCamelCase ( _A , _A , _A , _A , _A , _A ):
def cross_entropy(_A , _A , _A=None ):
lowerCAmelCase_ = logits.shape[-1]
lowerCAmelCase_ = (labels[..., None] == jnp.arange(_A )[None]).astype('''f4''' )
lowerCAmelCase_ = jax.nn.log_softmax(_A , axis=-1 )
lowerCAmelCase_ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowerCAmelCase_ = reduction(_A )
return loss
lowerCAmelCase_ = partial(_A , reduction=jnp.mean )
lowerCAmelCase_ = cross_entropy(_A , _A )
lowerCAmelCase_ = cross_entropy(_A , _A )
lowerCAmelCase_ = cross_entropy(_A , _A )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class A :
__snake_case = 'google/bigbird-roberta-base'
__snake_case = 3000
__snake_case = 1_0500
__snake_case = 128
__snake_case = 3
__snake_case = 1
__snake_case = 5
# tx_args
__snake_case = 3E-5
__snake_case = 0.0
__snake_case = 2_0000
__snake_case = 0.0095
__snake_case = 'bigbird-roberta-natural-questions'
__snake_case = 'training-expt'
__snake_case = 'data/nq-training.jsonl'
__snake_case = 'data/nq-validation.jsonl'
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
os.makedirs(self.base_dir, exist_ok=UpperCAmelCase__ )
lowerCAmelCase_ = os.path.join(self.base_dir, self.save_dir )
lowerCAmelCase_ = self.batch_size_per_device * jax.device_count()
@dataclass
class A :
__snake_case = 42
__snake_case = 4096 # no dynamic padding on TPUs
def __call__( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.collate_fn(UpperCAmelCase__ )
lowerCAmelCase_ = jax.tree_util.tree_map(UpperCAmelCase__, UpperCAmelCase__ )
return batch
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.fetch_inputs(features['''input_ids'''] )
lowerCAmelCase_ = {
'''input_ids''': jnp.array(UpperCAmelCase__, dtype=jnp.intaa ),
'''attention_mask''': jnp.array(UpperCAmelCase__, dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''], dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''], dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''], dtype=jnp.intaa ),
}
return batch
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = [self._fetch_inputs(UpperCAmelCase__ ) for ids in input_ids]
return zip(*UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = [1 for _ in range(len(UpperCAmelCase__ ) )]
while len(UpperCAmelCase__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __UpperCamelCase ( _A , _A , _A=None ):
if seed is not None:
lowerCAmelCase_ = dataset.shuffle(seed=_A )
for i in range(len(_A ) // batch_size ):
lowerCAmelCase_ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_A )
@partial(jax.pmap , axis_name='''batch''' )
def __UpperCamelCase ( _A , _A , **_A ):
def loss_fn(_A ):
lowerCAmelCase_ = model_inputs.pop('''start_labels''' )
lowerCAmelCase_ = model_inputs.pop('''end_labels''' )
lowerCAmelCase_ = model_inputs.pop('''pooled_labels''' )
lowerCAmelCase_ = state.apply_fn(**_A , params=_A , dropout_rng=_A , train=_A )
lowerCAmelCase_ = outputs
return state.loss_fn(
_A , _A , _A , _A , _A , _A , )
lowerCAmelCase_ = jax.random.split(_A )
lowerCAmelCase_ = jax.value_and_grad(_A )
lowerCAmelCase_ = grad_fn(state.params )
lowerCAmelCase_ = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
lowerCAmelCase_ = jax.lax.pmean(_A , '''batch''' )
lowerCAmelCase_ = state.apply_gradients(grads=_A )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def __UpperCamelCase ( _A , **_A ):
lowerCAmelCase_ = model_inputs.pop('''start_labels''' )
lowerCAmelCase_ = model_inputs.pop('''end_labels''' )
lowerCAmelCase_ = model_inputs.pop('''pooled_labels''' )
lowerCAmelCase_ = state.apply_fn(**_A , params=state.params , train=_A )
lowerCAmelCase_ = outputs
lowerCAmelCase_ = state.loss_fn(_A , _A , _A , _A , _A , _A )
lowerCAmelCase_ = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class A ( train_state.TrainState ):
__snake_case = struct.field(pytree_node=lowercase__ )
@dataclass
class A :
__snake_case = 42
__snake_case = 42
__snake_case = 42
__snake_case = 42
__snake_case = 42
__snake_case = 42
__snake_case = None
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__=None ):
"""simple docstring"""
lowerCAmelCase_ = model.params
lowerCAmelCase_ = TrainState.create(
apply_fn=model.__call__, params=UpperCAmelCase__, tx=UpperCAmelCase__, loss_fn=UpperCAmelCase__, )
if ckpt_dir is not None:
lowerCAmelCase_ = restore_checkpoint(UpperCAmelCase__, UpperCAmelCase__ )
lowerCAmelCase_ = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
lowerCAmelCase_ = build_tx(**UpperCAmelCase__ )
lowerCAmelCase_ = train_state.TrainState(
step=UpperCAmelCase__, apply_fn=model.__call__, params=UpperCAmelCase__, tx=UpperCAmelCase__, opt_state=UpperCAmelCase__, )
lowerCAmelCase_ = args
lowerCAmelCase_ = data_collator
lowerCAmelCase_ = lr
lowerCAmelCase_ = params
lowerCAmelCase_ = jax_utils.replicate(UpperCAmelCase__ )
return state
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.args
lowerCAmelCase_ = len(UpperCAmelCase__ ) // args.batch_size
lowerCAmelCase_ = jax.random.PRNGKey(0 )
lowerCAmelCase_ = jax.random.split(UpperCAmelCase__, jax.device_count() )
for epoch in range(args.max_epochs ):
lowerCAmelCase_ = jnp.array(0, dtype=jnp.floataa )
lowerCAmelCase_ = get_batched_dataset(UpperCAmelCase__, args.batch_size, seed=UpperCAmelCase__ )
lowerCAmelCase_ = 0
for batch in tqdm(UpperCAmelCase__, total=UpperCAmelCase__, desc=f"Running EPOCH-{epoch}" ):
lowerCAmelCase_ = self.data_collator(UpperCAmelCase__ )
lowerCAmelCase_ = self.train_step_fn(UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
lowerCAmelCase_ = jax_utils.unreplicate(state.step )
lowerCAmelCase_ = running_loss.item() / i
lowerCAmelCase_ = self.scheduler_fn(state_step - 1 )
lowerCAmelCase_ = self.evaluate(UpperCAmelCase__, UpperCAmelCase__ )
lowerCAmelCase_ = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(UpperCAmelCase__ ) )
self.logger.log(UpperCAmelCase__, commit=UpperCAmelCase__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"-e{epoch}-s{i}", state=UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = get_batched_dataset(UpperCAmelCase__, self.args.batch_size )
lowerCAmelCase_ = len(UpperCAmelCase__ ) // self.args.batch_size
lowerCAmelCase_ = jnp.array(0, dtype=jnp.floataa )
lowerCAmelCase_ = 0
for batch in tqdm(UpperCAmelCase__, total=UpperCAmelCase__, desc='''Evaluating ... ''' ):
lowerCAmelCase_ = self.data_collator(UpperCAmelCase__ )
lowerCAmelCase_ = self.val_step_fn(UpperCAmelCase__, **UpperCAmelCase__ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = jax_utils.unreplicate(UpperCAmelCase__ )
print(f"SAVING CHECKPOINT IN {save_dir}", end=''' ... ''' )
self.model_save_fn(UpperCAmelCase__, params=state.params )
with open(os.path.join(UpperCAmelCase__, '''opt_state.msgpack''' ), '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args, os.path.join(UpperCAmelCase__, '''args.joblib''' ) )
joblib.dump(self.data_collator, os.path.join(UpperCAmelCase__, '''data_collator.joblib''' ) )
with open(os.path.join(UpperCAmelCase__, '''training_state.json''' ), '''w''' ) as f:
json.dump({'''step''': state.step.item()}, UpperCAmelCase__ )
print('''DONE''' )
def __UpperCamelCase ( _A , _A ):
print(f"RESTORING CHECKPOINT FROM {save_dir}" , end=''' ... ''' )
with open(os.path.join(_A , '''flax_model.msgpack''' ) , '''rb''' ) as f:
lowerCAmelCase_ = from_bytes(state.params , f.read() )
with open(os.path.join(_A , '''opt_state.msgpack''' ) , '''rb''' ) as f:
lowerCAmelCase_ = from_bytes(state.opt_state , f.read() )
lowerCAmelCase_ = joblib.load(os.path.join(_A , '''args.joblib''' ) )
lowerCAmelCase_ = joblib.load(os.path.join(_A , '''data_collator.joblib''' ) )
with open(os.path.join(_A , '''training_state.json''' ) , '''r''' ) as f:
lowerCAmelCase_ = json.load(_A )
lowerCAmelCase_ = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def __UpperCamelCase ( _A , _A , _A , _A ):
lowerCAmelCase_ = num_train_steps - warmup_steps
lowerCAmelCase_ = optax.linear_schedule(init_value=_A , end_value=_A , transition_steps=_A )
lowerCAmelCase_ = optax.linear_schedule(init_value=_A , end_value=1E-7 , transition_steps=_A )
lowerCAmelCase_ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __UpperCamelCase ( _A , _A , _A , _A , _A ):
def weight_decay_mask(_A ):
lowerCAmelCase_ = traverse_util.flatten_dict(_A )
lowerCAmelCase_ = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(_A )
lowerCAmelCase_ = scheduler_fn(_A , _A , _A , _A )
lowerCAmelCase_ = optax.adamw(learning_rate=_A , weight_decay=_A , mask=_A )
return tx, lr
| 700 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class A ( __UpperCAmelCase ):
__snake_case = 'data2vec-vision'
def __init__( self, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=224, UpperCamelCase__=16, UpperCamelCase__=3, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=True, UpperCamelCase__=[3, 5, 7, 11], UpperCamelCase__=[1, 2, 3, 6], UpperCamelCase__=True, UpperCamelCase__=0.4, UpperCamelCase__=256, UpperCamelCase__=1, UpperCamelCase__=False, UpperCamelCase__=255, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = use_mask_token
lowerCAmelCase_ = use_absolute_position_embeddings
lowerCAmelCase_ = use_relative_position_bias
lowerCAmelCase_ = use_shared_relative_position_bias
lowerCAmelCase_ = layer_scale_init_value
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase_ = out_indices
lowerCAmelCase_ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase_ = use_auxiliary_head
lowerCAmelCase_ = auxiliary_loss_weight
lowerCAmelCase_ = auxiliary_channels
lowerCAmelCase_ = auxiliary_num_convs
lowerCAmelCase_ = auxiliary_concat_input
lowerCAmelCase_ = semantic_loss_ignore_index
class A ( __UpperCAmelCase ):
__snake_case = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 1E-4
| 325 | 0 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
A_ = datasets.load_iris()
A_ = np.array(data["data"])
A_ = np.array(data["target"])
A_ = data["target_names"]
A_ , A_ , A_ , A_ = train_test_split(X, y)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
return np.linalg.norm(np.array(a__ ) - np.array(a__ ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=5 ) -> Dict:
lowerCamelCase_ = zip(a__ ,a__ )
# List of distances of all points from the point to be classified
lowerCamelCase_ = []
for data_point in data:
lowerCamelCase_ = euclidean_distance(data_point[0] ,a__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
lowerCamelCase_ = [i[1] for i in sorted(a__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowerCamelCase_ = Counter(a__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 42 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
def __init__( self , lowerCamelCase_ , lowerCamelCase_=3 , lowerCamelCase_=32 , lowerCamelCase_=3 , lowerCamelCase_=10 , lowerCamelCase_=[10, 20, 30, 40] , lowerCamelCase_=[1, 1, 2, 1] , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_="relu" , lowerCamelCase_=3 , lowerCamelCase_=None , ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = embeddings_size
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_act
_UpperCamelCase = num_labels
_UpperCamelCase = scope
_UpperCamelCase = len(lowerCamelCase_ )
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self ) -> List[str]:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase = TFResNetModel(config=lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFResNetForImageClassification(lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowercase , lowercase , unittest.TestCase ):
__lowercase : str = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__lowercase : Union[str, Any] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__lowercase : Tuple = False
__lowercase : Dict = False
__lowercase : Any = False
__lowercase : int = False
__lowercase : Optional[Any] = False
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = TFResNetModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowercase ( self ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def lowercase ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowercase ( self ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
_UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase = layer_type
_UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowercase ( self ) -> List[str]:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFResNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def _lowercase ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="tf" )
# forward pass
_UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
_UpperCamelCase = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
_UpperCamelCase = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
| 147 | 0 |
def __lowerCAmelCase ( A , A , A ):
return round(float(moles / volume ) * nfactor )
def __lowerCAmelCase ( A , A , A ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __lowerCAmelCase ( A , A , A ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __lowerCAmelCase ( A , A , A ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 268 |
def __lowerCAmelCase ( A ):
if len(A ) <= 1:
return lst
UpperCAmelCase_ = 1
while i < len(A ):
if lst[i - 1] <= lst[i]:
i += 1
else:
UpperCAmelCase_ , UpperCAmelCase_ = lst[i], lst[i - 1]
i -= 1
if i == 0:
UpperCAmelCase_ = 1
return lst
if __name__ == "__main__":
_a: List[Any] = input("""Enter numbers separated by a comma:\n""").strip()
_a: Optional[int] = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted)) | 268 | 1 |
import math
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> str:
"""simple docstring"""
a_ : str = 0
a_ : Union[str, Any] = 0
while num > 0:
a_ : List[Any] = num % 8
a_ : Dict = octal + (remainder * math.floor(math.pow(10 , __A ) ))
counter += 1
a_ : List[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"""0o{int(__A )}"""
def SCREAMING_SNAKE_CASE_ ( ) -> None:
"""simple docstring"""
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(65 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(2_16 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 570 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Optional[int] = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
a_ : Optional[int] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : List[Any] = self.scheduler_classes[0]
a_ : Optional[Any] = self.get_scheduler_config()
a_ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
a_ : int = self.scheduler_classes[0]
a_ : List[Any] = self.get_scheduler_config()
a_ : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
a_ : List[str] = len(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.dummy_model()
a_ : str = self.dummy_sample_deter
a_ : int = self.dummy_sample_deter + 0.1
a_ : List[str] = self.dummy_sample_deter - 0.1
a_ : Dict = samplea.shape[0]
a_ : Any = torch.stack([samplea, samplea, samplea] , dim=0 )
a_ : Optional[int] = torch.arange(SCREAMING_SNAKE_CASE__ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE__ )
a_ : Tuple = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
a_ : str = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
a_ : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
a_ : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
a_ : Any = self.scheduler_classes[0]
a_ : List[str] = self.get_scheduler_config()
a_ : int = scheduler_class(**SCREAMING_SNAKE_CASE__ )
a_ : Dict = len(SCREAMING_SNAKE_CASE__ )
a_ : int = self.dummy_model()
a_ : Optional[Any] = self.dummy_sample_deter
a_ : Any = torch.manual_seed(0 )
for t in reversed(range(SCREAMING_SNAKE_CASE__ ) ):
# 1. predict noise residual
a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# 2. predict previous mean of sample x_t-1
a_ : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
a_ : Union[str, Any] = pred_prev_sample
a_ : List[str] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
a_ : Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
a_ : Any = self.scheduler_classes[0]
a_ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
a_ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__ )
a_ : Tuple = len(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.dummy_model()
a_ : Optional[Any] = self.dummy_sample_deter
a_ : str = torch.manual_seed(0 )
for t in reversed(range(SCREAMING_SNAKE_CASE__ ) ):
# 1. predict noise residual
a_ : Any = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# 2. predict previous mean of sample x_t-1
a_ : int = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
a_ : List[str] = pred_prev_sample
a_ : Optional[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
a_ : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
a_ : Optional[int] = self.scheduler_classes[0]
a_ : int = self.get_scheduler_config()
a_ : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
a_ : int = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(SCREAMING_SNAKE_CASE__ ):
if i == len(SCREAMING_SNAKE_CASE__ ) - 1:
a_ : Any = -1
else:
a_ : Optional[int] = timesteps[i + 1]
a_ : Optional[Any] = scheduler.previous_timestep(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = prev_t.item()
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
a_ : Tuple = self.scheduler_classes[0]
a_ : List[Any] = self.get_scheduler_config()
a_ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
a_ : str = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE__ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
a_ : List[str] = self.scheduler_classes[0]
a_ : Dict = self.get_scheduler_config()
a_ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = [1_0_0, 8_7, 5_0, 1, 0]
a_ : Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
with self.assertRaises(SCREAMING_SNAKE_CASE__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE__ , timesteps=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
a_ : int = self.scheduler_classes[0]
a_ : str = self.get_scheduler_config()
a_ : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
a_ : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ )
| 570 | 1 |
from scipy.stats import spearmanr
import datasets
_lowercase : int ='\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
_lowercase : List[str] ='\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
_lowercase : int =R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _A ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def _A ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
'''simple docstring'''
a__ = spearmanr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 709 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_lowercase : int =logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCAmelCase_ ( A_ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ):
'''simple docstring'''
super().__init__(*lowerCamelCase , **lowerCamelCase )
a__ = eval_examples
a__ = post_process_function
a__ = quant_trainer_args
a__ = 128 # default number of calibration samples
def _A ( self , lowerCamelCase=None ):
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
a__ = calib_dataset if calib_dataset is not None else self.calib_dataset
a__ = self._remove_unused_columns(lowerCamelCase , description="""Calibration""" )
return DataLoader(
lowerCamelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=lowerCamelCase , )
def _A ( self , lowerCamelCase=None ):
'''simple docstring'''
a__ = self.train_dataset if calib_dataset is None else calib_dataset
a__ = self.get_calib_dataloader(lowerCamelCase )
a__ = self.model
quant_trainer.configure_model(lowerCamelCase , self.quant_trainer_args , calib=lowerCamelCase )
model.eval()
quant_trainer.enable_calibration(lowerCamelCase )
logger.info("""***** Running calibration *****""" )
logger.info(f' Num examples = {self.calib_num}' )
logger.info(f' Batch size = {calib_dataloader.batch_size}' )
for step, inputs in enumerate(lowerCamelCase ):
# Prediction step
a__ , a__ , a__ = self.prediction_step(lowerCamelCase , lowerCamelCase , prediction_loss_only=lowerCamelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(lowerCamelCase , self.quant_trainer_args )
a__ = model
def _A ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase = "eval" ):
'''simple docstring'''
a__ = self.eval_dataset if eval_dataset is None else eval_dataset
a__ = self.get_eval_dataloader(lowerCamelCase )
a__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a__ = self.compute_metrics
a__ = None
a__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
a__ = eval_loop(
lowerCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase , )
finally:
a__ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
a__ = self.post_process_function(lowerCamelCase , lowerCamelCase , output.predictions )
a__ = self.compute_metrics(lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
a__ = metrics.pop(lowerCamelCase )
self.log(lowerCamelCase )
else:
a__ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
a__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCamelCase )
return metrics
def _A ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase = "test" ):
'''simple docstring'''
a__ = self.get_test_dataloader(lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
a__ = self.compute_metrics
a__ = None
a__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
a__ = eval_loop(
lowerCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase , )
finally:
a__ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
a__ = self.post_process_function(lowerCamelCase , lowerCamelCase , output.predictions , """predict""" )
a__ = self.compute_metrics(lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
a__ = metrics.pop(lowerCamelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCamelCase )
def _A ( self , lowerCamelCase="./" ):
'''simple docstring'''
a__ = self.eval_dataset
a__ = self.get_eval_dataloader(lowerCamelCase )
a__ = next(iter(lowerCamelCase ) )
# saving device - to make it consistent
a__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
a__ = tuple(v.to(lowerCamelCase ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
a__ = True
a__ = self.model.to(lowerCamelCase )
model.eval()
model.float()
a__ = model.module if hasattr(lowerCamelCase , """module""" ) else model
quant_trainer.configure_model(lowerCamelCase , self.quant_trainer_args )
a__ = os.path.join(lowerCamelCase , """model.onnx""" )
logger.info(f'exporting model to {output_model_file}' )
a__ = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
lowerCamelCase , lowerCamelCase , lowerCamelCase , export_params=lowerCamelCase , opset_version=13 , do_constant_folding=lowerCamelCase , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=lowerCamelCase , )
logger.info("""onnx export finished""" )
| 412 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : str = logging.get_logger(__name__)
def _lowerCAmelCase ( __magic_name__ :Dict , __magic_name__ :Optional[int]=False ):
UpperCAmelCase_ = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def _lowerCAmelCase ( __magic_name__ :Optional[int] , __magic_name__ :List[Any] , __magic_name__ :List[str]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ = ''''''
else:
UpperCAmelCase_ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( __magic_name__ :str ):
UpperCAmelCase_ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def _lowerCAmelCase ( __magic_name__ :int , __magic_name__ :Optional[Any] , __magic_name__ :Union[str, Any] ):
UpperCAmelCase_ = dct.pop(__magic_name__ )
UpperCAmelCase_ = val
def _lowerCAmelCase ( ):
UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( __magic_name__ :int , __magic_name__ :List[Any] , __magic_name__ :List[str]=False ):
UpperCAmelCase_ = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=__magic_name__ , )
UpperCAmelCase_ = ViTHybridConfig(backbone_config=__magic_name__ , image_size=3_8_4 , num_labels=1_0_0_0 )
UpperCAmelCase_ = False
# load original model from timm
UpperCAmelCase_ = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
UpperCAmelCase_ = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(__magic_name__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCAmelCase_ = ViTHybridModel(__magic_name__ ).eval()
else:
UpperCAmelCase_ = ViTHybridForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# create image processor
UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
UpperCAmelCase_ = transform.transforms
UpperCAmelCase_ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
UpperCAmelCase_ = ViTHybridImageProcessor(
do_resize=__magic_name__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = transform(__magic_name__ ).unsqueeze(0 )
UpperCAmelCase_ = processor(__magic_name__ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
UpperCAmelCase_ = model(__magic_name__ )
UpperCAmelCase_ = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
UpperCAmelCase_ = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1e-3 )
else:
UpperCAmelCase_ = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 121 |
import os
from math import logaa
def _lowerCAmelCase ( __magic_name__ :str = "base_exp.txt" ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) ):
UpperCAmelCase_, UpperCAmelCase_ = list(map(__magic_name__ , line.split(''',''' ) ) )
if x * logaa(__magic_name__ ) > largest:
UpperCAmelCase_ = x * logaa(__magic_name__ )
UpperCAmelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 121 | 1 |
def A_ ( _lowerCAmelCase = 100 ) -> int:
UpperCamelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
UpperCamelCase : List[str] = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 713 |
from PIL import Image
def A_ ( _lowerCAmelCase ) -> Image:
UpperCamelCase , UpperCamelCase : List[Any] = image.size
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : List[str] = image.load()
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
UpperCamelCase : List[Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCAmelCase ):
for i in range(_lowerCAmelCase ):
UpperCamelCase : Union[str, Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 38 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Tuple = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
A__ : torch.FloatTensor
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self , A = 16 , A = 88 , A = None , A = None , A = 1 , A = 0.0 , A = 32 , A = None , A = False , A = None , A = "geglu" , A = True , A = True , ) -> Union[str, Any]:
super().__init__()
A: Union[str, Any] = num_attention_heads
A: Optional[Any] = attention_head_dim
A: Optional[int] = num_attention_heads * attention_head_dim
A: str = in_channels
A: List[Any] = torch.nn.GroupNorm(num_groups=A , num_channels=A , eps=1e-6 , affine=A )
A: Optional[int] = nn.Linear(A , A )
# 3. Define transformers blocks
A: Optional[Any] = nn.ModuleList(
[
BasicTransformerBlock(
A , A , A , dropout=A , cross_attention_dim=A , activation_fn=A , attention_bias=A , double_self_attention=A , norm_elementwise_affine=A , )
for d in range(A )
] )
A: Tuple = nn.Linear(A , A )
def a__ ( self , A , A=None , A=None , A=None , A=1 , A=None , A = True , ) -> str:
A , A , A , A: Optional[Any] = hidden_states.shape
A: Optional[Any] = batch_frames // num_frames
A: List[str] = hidden_states
A: List[str] = hidden_states[None, :].reshape(A , A , A , A , A )
A: Dict = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
A: List[str] = self.norm(A )
A: List[Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , A , A )
A: Optional[Any] = self.proj_in(A )
# 2. Blocks
for block in self.transformer_blocks:
A: int = block(
A , encoder_hidden_states=A , timestep=A , cross_attention_kwargs=A , class_labels=A , )
# 3. Output
A: Tuple = self.proj_out(A )
A: List[str] = (
hidden_states[None, None, :]
.reshape(A , A , A , A , A )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
A: Optional[int] = hidden_states.reshape(A , A , A , A )
A: Optional[int] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=A )
| 135 | 0 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class snake_case ( __lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = ['pixel_values']
def __init__( self , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = 8 , **lowerCamelCase , ) -> None:
"""simple docstring"""
super().__init__(**_UpperCamelCase )
snake_case__ : Tuple = do_rescale
snake_case__ : str = rescale_factor
snake_case__ : Any = do_pad
snake_case__ : Optional[int] = pad_size
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Any = get_image_size(_UpperCamelCase )
snake_case__ : str = (old_height // size + 1) * size - old_height
snake_case__ : List[Any] = (old_width // size + 1) * size - old_width
return pad(_UpperCamelCase , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=_UpperCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
snake_case__ : int = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ : List[Any] = do_pad if do_pad is not None else self.do_pad
snake_case__ : List[str] = pad_size if pad_size is not None else self.pad_size
snake_case__ : str = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
snake_case__ : Optional[int] = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_rescale:
snake_case__ : Union[str, Any] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_pad:
snake_case__ : Union[str, Any] = [self.pad(_UpperCamelCase , size=_UpperCamelCase ) for image in images]
snake_case__ : Optional[Any] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
snake_case__ : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
| 715 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
_lowerCAmelCase : Dict = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
_lowerCAmelCase : str = {"vinai/bartpho-syllable": 1_0_2_4}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
snake_case__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
snake_case__ : int = vocab_file
snake_case__ : Optional[Any] = monolingual_vocab_file
snake_case__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
snake_case__ : Dict = {}
snake_case__ : Union[str, Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : List[str] = cnt
cnt += 1
with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
snake_case__ : Optional[int] = line.strip().split()[0]
snake_case__ : List[Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : Any = len(self.fairseq_tokens_to_ids )
snake_case__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = self.__dict__.copy()
snake_case__ : Any = None
snake_case__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : Dict = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : List[str] = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : int = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = ''''''.join(lowerCamelCase ).replace(lowerCamelCase , ''' ''' ).strip()
return out_string
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , '''wb''' ) as fi:
snake_case__ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowerCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 0 |
def lowerCAmelCase_ (lowerCAmelCase__: list ):
"""simple docstring"""
def merge(lowerCAmelCase__: list , lowerCAmelCase__: list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(lowerCAmelCase__ ) <= 1:
return collection
UpperCAmelCase_: str = len(lowerCAmelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Tuple = input('Enter numbers separated by a comma:\n').strip()
a : str = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 556 |
def lowerCAmelCase_ (lowerCAmelCase__: list ):
"""simple docstring"""
if len(lowerCAmelCase__ ) <= 1:
return [tuple(lowerCAmelCase__ )]
UpperCAmelCase_: List[Any] = []
def generate(lowerCAmelCase__: int , lowerCAmelCase__: list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCAmelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = arr[k - 1], arr[i]
else: # k is odd
UpperCAmelCase_ , UpperCAmelCase_: Optional[int] = arr[k - 1], arr[0]
generate(k - 1 , lowerCAmelCase__ )
generate(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
return res
if __name__ == "__main__":
a : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
a : str = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 556 | 1 |
"""simple docstring"""
def lowerCamelCase (a_ :int , a_ :int) -> int:
while a != 0:
lowercase :List[Any] = b % a, a
return b
def lowerCamelCase (a_ :int , a_ :int) -> int:
if gcd(a_ , a_) != 1:
lowercase :List[str] = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(a_)
lowercase :Tuple = 1, 0, a
lowercase :Optional[Any] = 0, 1, m
while va != 0:
lowercase :str = ua // va
lowercase :Tuple = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 706 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCamelCase (a_ :Dict) -> Dict:
lowercase :Tuple = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(a_ , a_)
def lowerCamelCase (a_ :Union[str, Any]) -> str:
lowercase , lowercase :Tuple = emb.weight.shape
lowercase :List[str] = nn.Linear(a_ , a_ , bias=a_)
lowercase :List[str] = emb.weight.data
return lin_layer
def lowerCamelCase (a_ :int , a_ :Union[str, Any]="facebook/mbart-large-en-ro" , a_ :Union[str, Any]=False , a_ :List[Any]=False) -> List[Any]:
lowercase :List[Any] = torch.load(a_ , map_location='''cpu''')['''model''']
remove_ignore_keys_(a_)
lowercase :Dict = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase :Tuple = MBartConfig.from_pretrained(a_ , vocab_size=a_)
if mbart_aa and finetuned:
lowercase :List[Any] = '''relu'''
lowercase :Optional[int] = state_dict['''decoder.embed_tokens.weight''']
lowercase :Union[str, Any] = MBartForConditionalGeneration(a_)
model.model.load_state_dict(a_)
if finetuned:
lowercase :Dict = make_linear_from_emb(model.model.shared)
return model
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 475 | 0 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class snake_case ( __snake_case ):
'''simple docstring'''
UpperCAmelCase : str = ["""image_processor"""]
UpperCAmelCase : Tuple = """SamImageProcessor"""
def __init__( self : int , lowerCAmelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().__init__(lowercase_ )
SCREAMING_SNAKE_CASE_ = self.image_processor
SCREAMING_SNAKE_CASE_ = -10
SCREAMING_SNAKE_CASE_ = self.image_processor.size["longest_edge"]
def __call__( self : List[str] , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any = None , **lowerCAmelCase_ : Optional[Any] , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processor(
lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# pop arguments that are not used in the foward but used nevertheless
SCREAMING_SNAKE_CASE_ = encoding_image_processor["original_sizes"]
if hasattr(lowercase_ , '''numpy''' ): # Checks if Torch or TF tensor
SCREAMING_SNAKE_CASE_ = original_sizes.numpy()
SCREAMING_SNAKE_CASE_ = self._check_and_preprocess_points(
input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , )
SCREAMING_SNAKE_CASE_ = self._normalize_and_convert(
lowercase_ , lowercase_ , input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , return_tensors=lowercase_ , )
return encoding_image_processor
def _lowercase ( self : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Tuple="pt" , ) -> Dict:
"""simple docstring"""
if input_points is not None:
if len(lowercase_ ) != len(lowercase_ ):
SCREAMING_SNAKE_CASE_ = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] ) for point in input_points
]
else:
SCREAMING_SNAKE_CASE_ = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ )
for point, original_size in zip(lowercase_ , lowercase_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
SCREAMING_SNAKE_CASE_ = self._pad_points_and_labels(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE_ = np.array(lowercase_ )
if input_labels is not None:
SCREAMING_SNAKE_CASE_ = np.array(lowercase_ )
if input_boxes is not None:
if len(lowercase_ ) != len(lowercase_ ):
SCREAMING_SNAKE_CASE_ = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] , is_bounding_box=lowercase_ )
for box in input_boxes
]
else:
SCREAMING_SNAKE_CASE_ = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ , is_bounding_box=lowercase_ )
for box, original_size in zip(lowercase_ , lowercase_ )
]
SCREAMING_SNAKE_CASE_ = np.array(lowercase_ )
if input_boxes is not None:
if return_tensors == "pt":
SCREAMING_SNAKE_CASE_ = torch.from_numpy(lowercase_ )
# boxes batch size of 1 by default
SCREAMING_SNAKE_CASE_ = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(lowercase_ )
# boxes batch size of 1 by default
SCREAMING_SNAKE_CASE_ = tf.expand_dims(lowercase_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
SCREAMING_SNAKE_CASE_ = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
SCREAMING_SNAKE_CASE_ = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
SCREAMING_SNAKE_CASE_ = tf.expand_dims(lowercase_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
SCREAMING_SNAKE_CASE_ = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
SCREAMING_SNAKE_CASE_ = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
SCREAMING_SNAKE_CASE_ = tf.expand_dims(lowercase_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def _lowercase ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = max([point.shape[0] for point in input_points] )
SCREAMING_SNAKE_CASE_ = []
for i, point in enumerate(lowercase_ ):
if point.shape[0] != expected_nb_points:
SCREAMING_SNAKE_CASE_ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
SCREAMING_SNAKE_CASE_ = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(lowercase_ )
SCREAMING_SNAKE_CASE_ = processed_input_points
return input_points, input_labels
def _lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = original_size
SCREAMING_SNAKE_CASE_ = self.image_processor._get_preprocess_shape(lowercase_ , longest_edge=lowercase_ )
SCREAMING_SNAKE_CASE_ = deepcopy(lowercase_ ).astype(lowercase_ )
if is_bounding_box:
SCREAMING_SNAKE_CASE_ = coords.reshape(-1 , 2 , 2 )
SCREAMING_SNAKE_CASE_ = coords[..., 0] * (new_w / old_w)
SCREAMING_SNAKE_CASE_ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
SCREAMING_SNAKE_CASE_ = coords.reshape(-1 , 4 )
return coords
def _lowercase ( self : List[str] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[Any]=None , ) -> List[str]:
"""simple docstring"""
if input_points is not None:
if hasattr(lowercase_ , '''numpy''' ): # Checks for TF or Torch tensor
SCREAMING_SNAKE_CASE_ = input_points.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_points[0] , lowercase_ ):
raise ValueError('''Input points must be a list of list of floating points.''' )
SCREAMING_SNAKE_CASE_ = [np.array(lowercase_ ) for input_point in input_points]
else:
SCREAMING_SNAKE_CASE_ = None
if input_labels is not None:
if hasattr(lowercase_ , '''numpy''' ):
SCREAMING_SNAKE_CASE_ = input_labels.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_labels[0] , lowercase_ ):
raise ValueError('''Input labels must be a list of list integers.''' )
SCREAMING_SNAKE_CASE_ = [np.array(lowercase_ ) for label in input_labels]
else:
SCREAMING_SNAKE_CASE_ = None
if input_boxes is not None:
if hasattr(lowercase_ , '''numpy''' ):
SCREAMING_SNAKE_CASE_ = input_boxes.numpy().tolist()
if (
not isinstance(lowercase_ , lowercase_ )
or not isinstance(input_boxes[0] , lowercase_ )
or not isinstance(input_boxes[0][0] , lowercase_ )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
SCREAMING_SNAKE_CASE_ = [np.array(lowercase_ ).astype(np.floataa ) for box in input_boxes]
else:
SCREAMING_SNAKE_CASE_ = None
return input_points, input_labels, input_boxes
@property
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(lowercase_ ) )
def _lowercase ( self : Optional[Any] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : int ) -> List[str]:
"""simple docstring"""
return self.image_processor.post_process_masks(*lowercase_ , **lowercase_ )
| 393 | def snake_case (__lowercase ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_snake_case : Union[str, Any] = grid[0]
for row_n in range(1 , len(__lowercase ) ):
_snake_case : Union[str, Any] = grid[row_n]
_snake_case : List[Any] = fill_row(__lowercase , __lowercase )
_snake_case : List[Any] = grid[row_n]
return grid[-1][-1]
def snake_case (__lowercase , __lowercase ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | 0 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_lowercase : Dict = get_logger(__name__)
def lowerCamelCase__ ( A : List[Any] , A : str , A : Dict , A : int , A : List[Any]=0 ):
'''simple docstring'''
os.makedirs(A , exist_ok=A )
with FSDP.state_dict_type(
A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
UpperCAmelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCAmelCase = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
UpperCAmelCase = os.path.join(A , A )
if accelerator.process_index == 0:
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(A , A )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCAmelCase = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
UpperCAmelCase = os.path.join(A , A )
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(A , A )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCAmelCase = os.path.join(A , f"""{MODEL_NAME}_{model_index}""" )
os.makedirs(A , exist_ok=A )
logger.info(f"""Saving model to {ckpt_dir}""" )
UpperCAmelCase = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=A , storage_writer=dist_cp.FileSystemWriter(A ) , planner=DefaultSavePlanner() , )
logger.info(f"""Model saved to {ckpt_dir}""" )
def lowerCamelCase__ ( A : Optional[int] , A : List[str] , A : Union[str, Any] , A : Dict , A : List[Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
UpperCAmelCase = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
UpperCAmelCase = os.path.join(A , A )
logger.info(f"""Loading model from {input_model_file}""" )
UpperCAmelCase = torch.load(A )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCAmelCase = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
UpperCAmelCase = os.path.join(A , A )
logger.info(f"""Loading model from {input_model_file}""" )
UpperCAmelCase = torch.load(A )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCAmelCase = (
os.path.join(A , f"""{MODEL_NAME}_{model_index}""" )
if f"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading model from {ckpt_dir}""" )
UpperCAmelCase = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=A , storage_reader=dist_cp.FileSystemReader(A ) , planner=DefaultLoadPlanner() , )
UpperCAmelCase = state_dict['''model''']
logger.info(f"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(A )
def lowerCamelCase__ ( A : str , A : Any , A : List[Any] , A : Dict , A : int , A : Optional[Any]=0 ):
'''simple docstring'''
os.makedirs(A , exist_ok=A )
with FSDP.state_dict_type(
A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
UpperCAmelCase = FSDP.optim_state_dict(A , A )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
UpperCAmelCase = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
UpperCAmelCase = os.path.join(A , A )
logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(A , A )
logger.info(f"""Optimizer state saved in {output_optimizer_file}""" )
else:
UpperCAmelCase = os.path.join(A , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(A , exist_ok=A )
logger.info(f"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(A ) , planner=DefaultSavePlanner() , )
logger.info(f"""Optimizer state saved in {ckpt_dir}""" )
def lowerCamelCase__ ( A : Union[str, Any] , A : Dict , A : Union[str, Any] , A : Any , A : Tuple , A : int=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCAmelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
UpperCAmelCase = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
UpperCAmelCase = os.path.join(A , A )
logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" )
UpperCAmelCase = torch.load(A )
logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" )
else:
UpperCAmelCase = (
os.path.join(A , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if f"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading Optimizer from {ckpt_dir}""" )
UpperCAmelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(A ) , )
UpperCAmelCase = optim_state['''optimizer''']
logger.info(f"""Optimizer loaded from {ckpt_dir}""" )
UpperCAmelCase = FSDP.optim_state_dict_to_load(A , A , A )
optimizer.load_state_dict(A )
| 50 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Tuple=32 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : Optional[Any]=[10, 20, 30, 40] , lowerCAmelCase : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[Any]="relu" , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Union[str, Any]=None , )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(lowerCAmelCase )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = self.get_config()
return config, pixel_values
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a__( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModel(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Optional[int] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__magic_name__ : Optional[int] = False
__magic_name__ : List[str] = False
__magic_name__ : Dict = False
def a__( self : Union[str, Any] )-> None:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
return
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def a__( self : Any )-> List[str]:
"""simple docstring"""
pass
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def a__( self : Tuple )-> int:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model_class(lowerCAmelCase )
@jax.jit
def model_jitted(lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple ):
return model(pixel_values=lowerCAmelCase , **lowerCAmelCase )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : Dict )-> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase = (1, 1000)
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 50 | 1 |
import argparse
import os
import re
_UpperCAmelCase = 'src/diffusers'
# Pattern that looks at the indentation in a line.
_UpperCAmelCase = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
_UpperCAmelCase = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_UpperCAmelCase = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
_UpperCAmelCase = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_UpperCAmelCase = re.compile(R'\[([^\]]+)\]')
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[str] ) -> Optional[int]:
__lowerCAmelCase : Any = _re_indent.search(SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Tuple="" , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :Tuple=None ) -> Optional[Any]:
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : Dict = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(SCREAMING_SNAKE_CASE ):
index += 1
__lowerCAmelCase : Tuple = ["""\n""".join(lines[:index] )]
else:
__lowerCAmelCase : Optional[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowerCAmelCase : Dict = [lines[index]]
index += 1
while index < len(SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(SCREAMING_SNAKE_CASE ) )
if index < len(SCREAMING_SNAKE_CASE ) - 1:
__lowerCAmelCase : Optional[Any] = [lines[index + 1]]
index += 1
else:
__lowerCAmelCase : List[Any] = []
else:
blocks.append("""\n""".join(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Any = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(SCREAMING_SNAKE_CASE ) > 0:
blocks.append("""\n""".join(SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(SCREAMING_SNAKE_CASE ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int ) -> Tuple:
def _inner(SCREAMING_SNAKE_CASE :List[Any] ):
return key(SCREAMING_SNAKE_CASE ).lower().replace("""_""" , """""" )
return _inner
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str]=None ) -> List[str]:
# If no key is provided, we use a noop.
def noop(SCREAMING_SNAKE_CASE :Dict ):
return x
if key is None:
__lowerCAmelCase : Tuple = noop
# Constants are all uppercase, they go first.
__lowerCAmelCase : Optional[Any] = [obj for obj in objects if key(SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(SCREAMING_SNAKE_CASE )[0].isupper() and not key(SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
__lowerCAmelCase : Dict = [obj for obj in objects if not key(SCREAMING_SNAKE_CASE )[0].isupper()]
__lowerCAmelCase : Dict = ignore_underscore(SCREAMING_SNAKE_CASE )
return sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Any ) -> List[str]:
# This inner function sort imports between [ ].
def _replace(SCREAMING_SNAKE_CASE :int ):
__lowerCAmelCase : Tuple = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
__lowerCAmelCase : Union[str, Any] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowerCAmelCase : Optional[int] = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(SCREAMING_SNAKE_CASE )] ) + "]"
__lowerCAmelCase : Tuple = import_statement.split("""\n""" )
if len(SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowerCAmelCase : List[Any] = 2 if lines[1].strip() == """[""" else 1
__lowerCAmelCase : List[Any] = [(i, _re_strip_line.search(SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowerCAmelCase : Dict = sort_objects(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )
__lowerCAmelCase : List[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowerCAmelCase : Any = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowerCAmelCase : Dict = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowerCAmelCase : Dict = keys[:-1]
__lowerCAmelCase : List[Any] = get_indent(lines[1] ) + """, """.join([F'''"{k}"''' for k in sort_objects(SCREAMING_SNAKE_CASE )] )
return "\n".join(SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
__lowerCAmelCase : Dict = _re_bracket_content.sub(_replace , SCREAMING_SNAKE_CASE )
return import_statement
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :List[str]=True ) -> int:
with open(SCREAMING_SNAKE_CASE , """r""" ) as f:
__lowerCAmelCase : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowerCAmelCase : int = split_code_in_indented_blocks(
SCREAMING_SNAKE_CASE , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowerCAmelCase : Tuple = main_blocks[block_idx]
__lowerCAmelCase : Tuple = block.split("""\n""" )
# Get to the start of the imports.
__lowerCAmelCase : Union[str, Any] = 0
while line_idx < len(SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowerCAmelCase : str = """\n""".join(block_lines[line_idx:-1] )
__lowerCAmelCase : Union[str, Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowerCAmelCase : Optional[int] = split_code_in_indented_blocks(SCREAMING_SNAKE_CASE , indent_level=SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowerCAmelCase : str = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowerCAmelCase : Dict = [(pattern.search(SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowerCAmelCase : Optional[Any] = [(i, key) for i, key in enumerate(SCREAMING_SNAKE_CASE ) if key is not None]
__lowerCAmelCase : Dict = [x[0] for x in sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowerCAmelCase : Any = 0
__lowerCAmelCase : Optional[int] = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__lowerCAmelCase : Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
__lowerCAmelCase : Optional[Any] = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write("""\n""".join(SCREAMING_SNAKE_CASE ) )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[str]=True ) -> Union[str, Any]:
__lowerCAmelCase : Tuple = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__lowerCAmelCase : Dict = sort_imports(os.path.join(SCREAMING_SNAKE_CASE , """__init__.py""" ) , check_only=SCREAMING_SNAKE_CASE )
if result:
__lowerCAmelCase : Optional[Any] = [os.path.join(SCREAMING_SNAKE_CASE , """__init__.py""" )]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F'''Would overwrite {len(SCREAMING_SNAKE_CASE )} files, run `make style`.''' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_UpperCAmelCase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 504 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class snake_case_ :
A_ = field(
metadata={'help': 'The output directory where the model will be written.'} ,)
A_ = field(
metadata={
'help': (
'The encoder model checkpoint for weights initialization.'
'Don\'t set if you want to train an encoder model from scratch.'
)
} ,)
A_ = field(
metadata={
'help': (
'The decoder model checkpoint for weights initialization.'
'Don\'t set if you want to train a decoder model from scratch.'
)
} ,)
A_ = field(
default=__lowercase ,metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} )
A_ = field(
default=__lowercase ,metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} )
def _SCREAMING_SNAKE_CASE ( ) -> int:
__lowerCAmelCase : int = HfArgumentParser((ModelArguments,) )
((__lowerCAmelCase) , ) : str = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
__lowerCAmelCase : List[str] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
__lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
__lowerCAmelCase : str = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Tuple = True
__lowerCAmelCase : Tuple = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=SCREAMING_SNAKE_CASE , decoder_config=SCREAMING_SNAKE_CASE , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
__lowerCAmelCase : str = decoder_config.decoder_start_token_id
__lowerCAmelCase : Tuple = decoder_config.pad_token_id
if decoder_start_token_id is None:
__lowerCAmelCase : List[Any] = decoder_config.bos_token_id
if pad_token_id is None:
__lowerCAmelCase : List[str] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
__lowerCAmelCase : List[str] = decoder_config.eos_token_id
__lowerCAmelCase : Union[str, Any] = decoder_start_token_id
__lowerCAmelCase : Any = pad_token_id
__lowerCAmelCase : List[str] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
__lowerCAmelCase : Any = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main() | 504 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __UpperCamelCase ( a, a, a, a, ) ->list[float]:
lowerCamelCase__ , lowerCamelCase__ = coefficient_matrix.shape
lowerCamelCase__ , lowerCamelCase__ = constant_matrix.shape
if rowsa != colsa:
lowerCamelCase__ = f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(a)
if colsa != 1:
lowerCamelCase__ = f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(a)
if rowsa != rowsa:
lowerCamelCase__ = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(a)
if len(a) != rowsa:
lowerCamelCase__ = (
"Number of initial values must be equal to number of rows in coefficient "
f"matrix but received {len(a)} and {rowsa}"
)
raise ValueError(a)
if iterations <= 0:
raise ValueError("Iterations must be at least 1")
lowerCamelCase__ = np.concatenate(
(coefficient_matrix, constant_matrix), axis=1)
lowerCamelCase__ , lowerCamelCase__ = table.shape
strictly_diagonally_dominant(a)
# Iterates the whole matrix for given number of times
for _ in range(a):
lowerCamelCase__ = []
for row in range(a):
lowerCamelCase__ = 0
for col in range(a):
if col == row:
lowerCamelCase__ = table[row][col]
elif col == cols - 1:
lowerCamelCase__ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowerCamelCase__ = (temp + val) / denom
new_val.append(a)
lowerCamelCase__ = new_val
return [float(a) for i in new_val]
def __UpperCamelCase ( a) ->bool:
lowerCamelCase__ , lowerCamelCase__ = table.shape
lowerCamelCase__ = True
for i in range(0, a):
lowerCamelCase__ = 0
for j in range(0, cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant")
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
def __UpperCamelCase ( a) ->float:
return 10 - x * x
def __UpperCamelCase ( a, a) ->float:
# Bolzano theory in order to find if there is a root between a and b
if equation(a) * equation(a) >= 0:
raise ValueError("Wrong space!")
lowerCamelCase__ = a
while (b - a) >= 0.0_1:
# Find middle point
lowerCamelCase__ = (a + b) / 2
# Check if middle point is root
if equation(a) == 0.0:
break
# Decide the side to repeat the steps
if equation(a) * equation(a) < 0:
lowerCamelCase__ = c
else:
lowerCamelCase__ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 360 | 0 |
from math import asin, atan, cos, radians, sin, sqrt, tan
lowerCAmelCase = 6_3_7_8_1_3_7.0
lowerCAmelCase = 6_3_5_6_7_5_2.3_1_4_2_4_5
lowerCAmelCase = 637_8137
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = (AXIS_A - AXIS_B) / AXIS_A
lowercase__ = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE ) ) )
lowercase__ = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE ) ) )
lowercase__ = radians(SCREAMING_SNAKE_CASE )
lowercase__ = radians(SCREAMING_SNAKE_CASE )
# Equation
lowercase__ = sin((phi_a - phi_a) / 2 )
lowercase__ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowercase__ = sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE ) * cos(SCREAMING_SNAKE_CASE ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def lowerCAmelCase_ ( ):
print("""Making key files...""" )
make_key_files("""rsa""" , 1024 )
print("""Key files generation successful.""" )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int ):
print("""Generating prime p...""" )
UpperCamelCase_ : Tuple = rabinMiller.generate_large_prime(_SCREAMING_SNAKE_CASE )
print("""Generating prime q...""" )
UpperCamelCase_ : List[Any] = rabinMiller.generate_large_prime(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : List[str] = p * q
print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" )
while True:
UpperCamelCase_ : Union[str, Any] = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) ) == 1:
break
print("""Calculating d that is mod inverse of e...""" )
UpperCamelCase_ : Tuple = cryptoMath.find_mod_inverse(_SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) )
UpperCamelCase_ : Tuple = (n, e)
UpperCamelCase_ : int = (n, d)
return (public_key, private_key)
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ):
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print("""\nWARNING:""" )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
UpperCamelCase_,UpperCamelCase_ : str = generate_key(_SCREAMING_SNAKE_CASE )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' , """w""" ) as out_file:
out_file.write(f'''{key_size},{public_key[0]},{public_key[1]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' , """w""" ) as out_file:
out_file.write(f'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 635 | 0 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
super().__init__()
_snake_case : str = value_function
_snake_case : str = unet
_snake_case : List[str] = scheduler
_snake_case : Optional[Any] = env
_snake_case : List[Any] = env.get_dataset()
_snake_case : Union[str, Any] = {}
for key in self.data.keys():
try:
_snake_case : int = self.data[key].mean()
except: # noqa: E722
pass
_snake_case : Optional[Any] = {}
for key in self.data.keys():
try:
_snake_case : str = self.data[key].std()
except: # noqa: E722
pass
_snake_case : Any = env.observation_space.shape[0]
_snake_case : str = env.action_space.shape[0]
def lowerCamelCase__ ( self , snake_case_ , snake_case_ ):
return (x_in - self.means[key]) / self.stds[key]
def lowerCamelCase__ ( self , snake_case_ , snake_case_ ):
return x_in * self.stds[key] + self.means[key]
def lowerCamelCase__ ( self , snake_case_ ):
if type(snake_case_ ) is dict:
return {k: self.to_torch(snake_case_ ) for k, v in x_in.items()}
elif torch.is_tensor(snake_case_ ):
return x_in.to(self.unet.device )
return torch.tensor(snake_case_ , device=self.unet.device )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ):
for key, val in cond.items():
_snake_case : Optional[int] = val.clone()
return x_in
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Optional[int] = x.shape[0]
_snake_case : Optional[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
_snake_case : Union[str, Any] = torch.full((batch_size,) , snake_case_ , device=self.unet.device , dtype=torch.long )
for _ in range(snake_case_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_snake_case : Optional[Any] = self.value_function(x.permute(0 , 2 , 1 ) , snake_case_ ).sample
_snake_case : Tuple = torch.autograd.grad([y.sum()] , [x] )[0]
_snake_case : Optional[int] = self.scheduler._get_variance(snake_case_ )
_snake_case : Dict = torch.exp(0.5 * posterior_variance )
_snake_case : int = model_std * grad
_snake_case : List[Any] = 0
_snake_case : int = x.detach()
_snake_case : int = x + scale * grad
_snake_case : Dict = self.reset_xa(snake_case_ , snake_case_ , self.action_dim )
_snake_case : List[Any] = self.unet(x.permute(0 , 2 , 1 ) , snake_case_ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
_snake_case : List[str] = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , predict_epsilon=snake_case_ )["prev_sample"]
# apply conditions to the trajectory (set the initial state)
_snake_case : Union[str, Any] = self.reset_xa(snake_case_ , snake_case_ , self.action_dim )
_snake_case : int = self.to_torch(snake_case_ )
return x, y
def __call__( self , snake_case_ , snake_case_=64 , snake_case_=32 , snake_case_=2 , snake_case_=0.1 ):
# normalize the observations and create batch dimension
_snake_case : Dict = self.normalize(snake_case_ , "observations" )
_snake_case : Union[str, Any] = obs[None].repeat(snake_case_ , axis=0 )
_snake_case : Dict = {0: self.to_torch(snake_case_ )}
_snake_case : Optional[int] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_snake_case : List[str] = randn_tensor(snake_case_ , device=self.unet.device )
_snake_case : Optional[int] = self.reset_xa(snake_case_ , snake_case_ , self.action_dim )
_snake_case : Optional[int] = self.to_torch(snake_case_ )
# run the diffusion process
_snake_case , _snake_case : Optional[int] = self.run_diffusion(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# sort output trajectories by value
_snake_case : Union[str, Any] = y.argsort(0 , descending=snake_case_ ).squeeze()
_snake_case : List[str] = x[sorted_idx]
_snake_case : Union[str, Any] = sorted_values[:, :, : self.action_dim]
_snake_case : Union[str, Any] = actions.detach().cpu().numpy()
_snake_case : List[Any] = self.de_normalize(snake_case_ , key="actions" )
# select the action with the highest value
if y is not None:
_snake_case : Dict = 0
else:
# if we didn't run value guiding, select a random action
_snake_case : Union[str, Any] = np.random.randint(0 , snake_case_ )
_snake_case : List[Any] = denorm_actions[selected_index, 0]
return denorm_actions
| 87 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a : Optional[int] = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 | 1 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ) -> List[str]:
super().tearDown()
gc.collect()
def __snake_case ( self ) -> int:
lowerCAmelCase = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
lowerCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=lowerCAmelCase__ , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
lowerCAmelCase = controlnet_params
lowerCAmelCase = "bird"
lowerCAmelCase = jax.device_count()
lowerCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
lowerCAmelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCAmelCase = jax.random.PRNGKey(0 )
lowerCAmelCase = jax.random.split(lowerCAmelCase__ , jax.device_count() )
lowerCAmelCase = replicate(lowerCAmelCase__ )
lowerCAmelCase = shard(lowerCAmelCase__ )
lowerCAmelCase = shard(lowerCAmelCase__ )
lowerCAmelCase = pipe(
prompt_ids=lowerCAmelCase__ , image=lowerCAmelCase__ , params=lowerCAmelCase__ , prng_seed=lowerCAmelCase__ , num_inference_steps=50 , jit=lowerCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase = images[0, 253:256, 253:256, -1]
lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
lowerCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=lowerCAmelCase__ , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
lowerCAmelCase = controlnet_params
lowerCAmelCase = "Chef in the kitchen"
lowerCAmelCase = jax.device_count()
lowerCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
lowerCAmelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCAmelCase = jax.random.PRNGKey(0 )
lowerCAmelCase = jax.random.split(lowerCAmelCase__ , jax.device_count() )
lowerCAmelCase = replicate(lowerCAmelCase__ )
lowerCAmelCase = shard(lowerCAmelCase__ )
lowerCAmelCase = shard(lowerCAmelCase__ )
lowerCAmelCase = pipe(
prompt_ids=lowerCAmelCase__ , image=lowerCAmelCase__ , params=lowerCAmelCase__ , prng_seed=lowerCAmelCase__ , num_inference_steps=50 , jit=lowerCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase = images[0, 253:256, 253:256, -1]
lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 433 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = StableDiffusionLDMaDPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self : Tuple ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,)
lowerCAmelCase_ : Any = DDIMScheduler(
beta_start=0.00_085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=lowerCAmelCase__ ,set_alpha_to_one=lowerCAmelCase__ ,)
torch.manual_seed(0 )
lowerCAmelCase_ : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=6 ,out_channels=6 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
lowerCAmelCase_ : Optional[int] = CLIPTextModel(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : List[str]=0 ) -> Dict:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith("mps" ):
lowerCAmelCase_ : Optional[int] = torch.manual_seed(lowerCAmelCase__ )
else:
lowerCAmelCase_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : List[str] = self.get_dummy_components()
lowerCAmelCase_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Any = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Any = output.rgb, output.depth
lowerCAmelCase_ : Dict = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ : Optional[Any] = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
lowerCAmelCase_ : Tuple = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.get_dummy_components()
lowerCAmelCase_ : List[str] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : str = 3 * [inputs["prompt"]]
# forward
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = output.rgb, output.depth
lowerCAmelCase_ : str = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ : List[str] = depth_slice_a[0, -3:, -1]
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = 3 * [inputs.pop("prompt" )]
lowerCAmelCase_ : str = ldmad_pipe.tokenizer(
lowerCAmelCase__ ,padding="max_length" ,max_length=ldmad_pipe.tokenizer.model_max_length ,truncation=lowerCAmelCase__ ,return_tensors="pt" ,)
lowerCAmelCase_ : Union[str, Any] = text_inputs["input_ids"].to(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = ldmad_pipe.text_encoder(lowerCAmelCase__ )[0]
lowerCAmelCase_ : Optional[int] = prompt_embeds
# forward
lowerCAmelCase_ : str = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : str = output.rgb, output.depth
lowerCAmelCase_ : Optional[Any] = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Optional[int] = self.get_dummy_components()
lowerCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : Any = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = "french fries"
lowerCAmelCase_ : Optional[int] = ldmad_pipe(**lowerCAmelCase__ ,negative_prompt=lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = output.rgb, output.depth
lowerCAmelCase_ : Any = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ : int = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
lowerCAmelCase_ : Union[str, Any] = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict="cpu" ,lowerCAmelCase__ : Union[str, Any]=torch.floataa ,lowerCAmelCase__ : List[str]=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ : Optional[Any] = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ ,dtype=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
lowerCAmelCase_ : List[str] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = output.rgb, output.depth
lowerCAmelCase_ : List[str] = rgb[0, -3:, -3:, -1].flatten()
lowerCAmelCase_ : Optional[int] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
lowerCAmelCase_ : int = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
lowerCAmelCase_ : Optional[Any] = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict="cpu" ,lowerCAmelCase__ : List[str]=torch.floataa ,lowerCAmelCase__ : Optional[int]=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ : Any = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ ,dtype=lowerCAmelCase__ )
lowerCAmelCase_ : int = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : Dict ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Any = output.rgb, output.depth
lowerCAmelCase_ : Dict = 0.495_586
lowerCAmelCase_ : Optional[Any] = 0.33_795_515
lowerCAmelCase_ : Any = 112.48_518
lowerCAmelCase_ : List[Any] = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : int = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : str = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = output.rgb, output.depth
lowerCAmelCase_ : List[str] = 0.4_194_127
lowerCAmelCase_ : List[str] = 0.35_375_586
lowerCAmelCase_ : str = 0.5_638_502
lowerCAmelCase_ : Optional[Any] = 0.34_686_103
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 659 | 0 |
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> str:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
_a = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCamelCase :Any = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase :List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCamelCase :List[str] = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowerCamelCase :Optional[Any] = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase :List[Any] = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCamelCase :int = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __snake_case ( _UpperCamelCase ) -> Dict:
_a = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , _UpperCamelCase )
return [m.group(0 ) for m in matches]
def __snake_case ( ) -> Union[str, Any]:
_a = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_a = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_a = collections.defaultdict(_UpperCamelCase )
_a = collections.defaultdict(_UpperCamelCase )
_a = collections.defaultdict(_UpperCamelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_UpperCamelCase ):
_a = None
if _re_tf_models.match(_UpperCamelCase ) is not None:
_a = tf_models
_a = _re_tf_models.match(_UpperCamelCase ).groups()[0]
elif _re_flax_models.match(_UpperCamelCase ) is not None:
_a = flax_models
_a = _re_flax_models.match(_UpperCamelCase ).groups()[0]
elif _re_pt_models.match(_UpperCamelCase ) is not None:
_a = pt_models
_a = _re_pt_models.match(_UpperCamelCase ).groups()[0]
if lookup_dict is not None:
while len(_UpperCamelCase ) > 0:
if attr_name in model_prefix_to_model_type:
_a = True
break
# Try again after removing the last word in the name
_a = ''''''.join(camel_case_split(_UpperCamelCase )[:-1] )
_a = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_a = list(_UpperCamelCase )
all_models.sort()
_a = {'''model_type''': all_models}
_a = [pt_models[t] for t in all_models]
_a = [tf_models[t] for t in all_models]
_a = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_a = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_a = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_a = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_a = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_a = '''AutoTokenizer'''
_a = [processors[t] for t in all_models]
return pd.DataFrame(_UpperCamelCase )
def __snake_case ( _UpperCamelCase ) -> int:
_a = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_a = [model_mapping, f"TF_{model_mapping}", f"FLAX_{model_mapping}"]
_a = [auto_class, f"TF_{auto_class}", f"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(_UpperCamelCase , _UpperCamelCase ):
continue
# First extract all model_names
_a = []
for name in getattr(_UpperCamelCase , _UpperCamelCase ).values():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
model_names.append(_UpperCamelCase )
else:
model_names.extend(list(_UpperCamelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> Tuple:
_a = get_frameworks_table()
_a = Dataset.from_pandas(_UpperCamelCase )
_a = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=_UpperCamelCase )
_a = Dataset.from_json(_UpperCamelCase )
_a = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(_UpperCamelCase ) )
}
_a = update_pipeline_and_auto_class_table(_UpperCamelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_a = sorted(table.keys() )
_a = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
_a = Dataset.from_pandas(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_UpperCamelCase , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(_UpperCamelCase , '''pipeline_tags.json''' ) )
if commit_sha is not None:
_a = (
f"Update with commit {commit_sha}\n\nSee: "
f"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
_a = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=_UpperCamelCase , repo_type='''dataset''' , token=_UpperCamelCase , commit_message=_UpperCamelCase , )
def __snake_case ( ) -> str:
_a = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_a = transformers_module.pipelines.SUPPORTED_TASKS
_a = []
for key in pipeline_tasks:
if key not in in_table:
_a = pipeline_tasks[key]['''pt''']
if isinstance(_UpperCamelCase , (list, tuple) ):
_a = model[0]
_a = model.__name__
if model not in in_table.values():
missing.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
_a = ''', '''.join(_UpperCamelCase )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
lowerCamelCase :str = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
lowerCamelCase :Dict = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 346 | 1 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
A = False
class __snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def UpperCAmelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : str = 'A painting of a squirrel eating a burger '
lowerCamelCase : Optional[int] = torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = pipe(
prompt=UpperCAmelCase_, generator=UpperCAmelCase_, guidance_scale=7.5, num_inference_steps=2, output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase_ )
lowerCamelCase : List[str] = VersatileDiffusionTextToImagePipeline.from_pretrained(UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : List[str] = generator.manual_seed(0 )
lowerCamelCase : Union[str, Any] = pipe(
prompt=UpperCAmelCase_, generator=UpperCAmelCase_, guidance_scale=7.5, num_inference_steps=2, output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion', torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : List[str] = 'A painting of a squirrel eating a burger '
lowerCamelCase : List[str] = torch.manual_seed(0 )
lowerCamelCase : Any = pipe(
prompt=UpperCAmelCase_, generator=UpperCAmelCase_, guidance_scale=7.5, num_inference_steps=50, output_type='numpy' ).images
lowerCamelCase : Optional[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 320 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 508 | 0 |
def lowerCamelCase ( UpperCAmelCase_ : int = 10 , UpperCAmelCase_ : int = 1000 , UpperCAmelCase_ : bool = True )-> int:
"""simple docstring"""
assert (
isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("""Invalid value for min_val or max_val (min_value < max_value)""" )
return min_val if option else max_val
def lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int )-> int:
"""simple docstring"""
return int((number_a + number_a) / 2 )
def lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int )-> None:
"""simple docstring"""
assert (
isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("""argument value for lower and higher must be(lower > higher)""" )
if not lower < to_guess < higher:
raise ValueError(
"""guess value must be within the range of lower and higher value""" )
def answer(UpperCAmelCase_ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("""started...""" )
a =lower
a =higher
a =[]
while True:
a =get_avg(UpperCAmelCase_ , UpperCAmelCase_ )
last_numbers.append(UpperCAmelCase_ )
if answer(UpperCAmelCase_ ) == "low":
a =number
elif answer(UpperCAmelCase_ ) == "high":
a =number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def lowerCamelCase ( )-> None:
"""simple docstring"""
a =int(input("""Enter lower value : """ ).strip() )
a =int(input("""Enter high value : """ ).strip() )
a =int(input("""Enter value to guess : """ ).strip() )
guess_the_number(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 321 |
from __future__ import annotations
from collections.abc import Callable
def lowerCamelCase ( UpperCAmelCase_ : Callable[[int | float], int | float] , UpperCAmelCase_ : int | float , UpperCAmelCase_ : int | float , UpperCAmelCase_ : int = 100 , )-> float:
"""simple docstring"""
a =x_start
a =fnc(UpperCAmelCase_ )
a =0.0
for _ in range(UpperCAmelCase_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
a =(x_end - x_start) / steps + xa
a =fnc(UpperCAmelCase_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
a =xa
a =fxa
return area
if __name__ == "__main__":
def lowerCamelCase ( UpperCAmelCase_ : List[str] )-> Optional[Any]:
"""simple docstring"""
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
_lowerCamelCase = 10
while i <= 100000:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 321 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase : Optional[Any] = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 649 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : str = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 649 | 1 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _UpperCAmelCase :
def __init__( self :Dict ):
A = ""
A = ""
A = []
A = 0
A = 2_56
A = 0
A = 0
A = 0
A = 0
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Tuple ):
A = cva.imread(__UpperCamelCase , 0 )
A = copy.deepcopy(self.img )
A, A, A = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label="x" )
A = np.sum(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
A = x[i] / self.k
self.sk += prk
A = (self.L - 1) * self.sk
if self.rem != 0:
A = int(last % last )
A = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__UpperCamelCase )
A = int(np.ma.count(self.img ) / self.img[1].size )
A = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
A = self.img[j][i]
if num != self.last_list[num]:
A = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def lowerCamelCase ( self :List[str] ):
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def lowerCamelCase ( self :List[Any] ):
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
_snake_case : List[str] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
_snake_case : Dict = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 524 |
"""simple docstring"""
def A__ ( UpperCamelCase , UpperCamelCase ):
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(UpperCamelCase ) * abs(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 524 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> str:
a__ : int = {}
if train_file is not None:
a__ : int = [train_file]
if eval_file is not None:
a__ : Union[str, Any] = [eval_file]
if test_file is not None:
a__ : str = [test_file]
a__ : Optional[Any] = datasets.load_dataset("csv" , data_files=__a )
a__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
a__ : str = features_name.pop(__a )
a__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) )
a__ : str = {label: i for i, label in enumerate(__a )}
a__ : Tuple = tokenizer.model_input_names
a__ : List[str] = {}
if len(__a ) == 1:
for k in files.keys():
a__ : Optional[Any] = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , )
elif len(__a ) == 2:
for k in files.keys():
a__ : Dict = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
a__ : str = {k: v for k, v in ex.items() if k in input_names}
a__ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
a__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
a__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
a__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
a__ : Optional[Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
a__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
a__ : Union[str, Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
a__ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
a__ : Union[str, Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
a__ : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(metadata={'help': 'Which column contains the label'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} )
_lowercase = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_lowercase = field(
default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def UpperCamelCase_ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
a__, a__, a__ : str = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a__, a__, a__, a__ : Optional[Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
a__ : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
a__ : Any = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , )
def compute_metrics(__a ) -> Dict:
a__ : Union[str, Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
a__ : Dict = TFTrainer(
model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a__ : Optional[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a__ : Dict = trainer.evaluate()
a__ : int = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(__a , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(__a )
return results
if __name__ == "__main__":
main()
| 37 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_a : Dict =[0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
_a : Union[str, Any] =0
_a : Optional[Any] =2
while digits < n:
index += 1
_a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int:
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 694 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
A : torch.FloatTensor
class UpperCAmelCase__ ( _snake_case , _snake_case ):
"""simple docstring"""
@register_to_config
def __init__(self , _a = 16 , _a = 88 , _a = None , _a = None , _a = 1 , _a = 0.0 , _a = 32 , _a = None , _a = False , _a = None , _a = "geglu" , _a = True , _a = True , ) -> Optional[int]:
super().__init__()
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : Union[str, Any] = attention_head_dim
lowercase_ : int = num_attention_heads * attention_head_dim
lowercase_ : Tuple = in_channels
lowercase_ : List[Any] = torch.nn.GroupNorm(num_groups=_a , num_channels=_a , eps=1e-6 , affine=_a )
lowercase_ : str = nn.Linear(_a , _a )
# 3. Define transformers blocks
lowercase_ : Any = nn.ModuleList(
[
BasicTransformerBlock(
_a , _a , _a , dropout=_a , cross_attention_dim=_a , activation_fn=_a , attention_bias=_a , double_self_attention=_a , norm_elementwise_affine=_a , )
for d in range(_a )
] )
lowercase_ : Optional[int] = nn.Linear(_a , _a )
def _lowerCamelCase (self , _a , _a=None , _a=None , _a=None , _a=1 , _a=None , _a = True , ) -> Union[str, Any]:
lowercase_ : List[Any] = hidden_states.shape
lowercase_ : List[str] = batch_frames // num_frames
lowercase_ : Optional[int] = hidden_states
lowercase_ : List[str] = hidden_states[None, :].reshape(_a , _a , _a , _a , _a )
lowercase_ : Optional[int] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowercase_ : List[str] = self.norm(_a )
lowercase_ : List[Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _a , _a )
lowercase_ : Dict = self.proj_in(_a )
# 2. Blocks
for block in self.transformer_blocks:
lowercase_ : Optional[int] = block(
_a , encoder_hidden_states=_a , timestep=_a , cross_attention_kwargs=_a , class_labels=_a , )
# 3. Output
lowercase_ : List[Any] = self.proj_out(_a )
lowercase_ : Any = (
hidden_states[None, None, :]
.reshape(_a , _a , _a , _a , _a )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowercase_ : int = hidden_states.reshape(_a , _a , _a , _a )
lowercase_ : List[str] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_a )
| 720 | '''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[Any] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowercase_ : List[Any] = 128
elif "12-12" in model_name:
lowercase_ : Tuple = 12
lowercase_ : List[Any] = 12
elif "14-14" in model_name:
lowercase_ : List[str] = 14
lowercase_ : Optional[Any] = 14
elif "16-16" in model_name:
lowercase_ : Union[str, Any] = 16
lowercase_ : List[str] = 16
else:
raise ValueError('Model not supported' )
lowercase_ : Optional[Any] = 'huggingface/label-files'
if "speech-commands" in model_name:
lowercase_ : List[str] = 35
lowercase_ : int = 'speech-commands-v2-id2label.json'
else:
lowercase_ : Union[str, Any] = 527
lowercase_ : int = 'audioset-id2label.json'
lowercase_ : Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
lowercase_ : Union[str, Any] = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase_ : Optional[int] = idalabel
lowercase_ : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
if "module.v" in name:
lowercase_ : Dict = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
lowercase_ : Optional[Any] = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
lowercase_ : Any = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
lowercase_ : List[str] = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowercase_ : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
lowercase_ : Optional[Any] = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
lowercase_ : Optional[int] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowercase_ : Dict = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowercase_ : int = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowercase_ : Optional[int] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowercase_ : Optional[int] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowercase_ : int = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowercase_ : int = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
lowercase_ : Dict = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
lowercase_ : List[Any] = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for key in orig_state_dict.copy().keys():
lowercase_ : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
lowercase_ : List[str] = key.split('.' )
lowercase_ : int = int(key_split[3] )
lowercase_ : Tuple = config.hidden_size
if "weight" in key:
lowercase_ : Tuple = val[:dim, :]
lowercase_ : Union[str, Any] = val[dim : dim * 2, :]
lowercase_ : Optional[int] = val[-dim:, :]
else:
lowercase_ : Optional[Any] = val[:dim]
lowercase_ : Any = val[dim : dim * 2]
lowercase_ : Tuple = val[-dim:]
else:
lowercase_ : Optional[Any] = val
return orig_state_dict
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[Any] = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
lowercase_ : Dict = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE_ )
lowercase_ : Optional[int] = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
lowercase_ : Dict = model_name_to_url[model_name]
lowercase_ : Optional[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE_ )
# rename some keys
lowercase_ : str = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load 🤗 model
lowercase_ : Optional[Any] = ASTForAudioClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowercase_ : Tuple = -4.267_7393 if 'speech-commands' not in model_name else -6.84_5978
lowercase_ : str = 4.568_9974 if 'speech-commands' not in model_name else 5.565_4526
lowercase_ : str = 1_024 if 'speech-commands' not in model_name else 128
lowercase_ : Dict = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
if "speech-commands" in model_name:
lowercase_ : Optional[Any] = load_dataset('speech_commands' , 'v0.02' , split='validation' )
lowercase_ : Any = dataset[0]['audio']['array']
else:
lowercase_ : Any = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
lowercase_ ,lowercase_ : Union[str, Any] = torchaudio.load(SCREAMING_SNAKE_CASE_ )
lowercase_ : str = waveform.squeeze().numpy()
lowercase_ : str = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=16_000 , return_tensors='pt' )
# forward pass
lowercase_ : Tuple = model(**SCREAMING_SNAKE_CASE_ )
lowercase_ : Tuple = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowercase_ : int = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowercase_ : Optional[int] = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowercase_ : Optional[Any] = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowercase_ : List[str] = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowercase_ : List[str] = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowercase_ : Any = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowercase_ : List[str] = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowercase_ : Optional[Any] = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(f'''MIT/{model_name}''' )
feature_extractor.push_to_hub(f'''MIT/{model_name}''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_A = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 438 | 0 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[list[str]] , SCREAMING_SNAKE_CASE__ : int , ):
"""simple docstring"""
snake_case_ : Any = len(SCREAMING_SNAKE_CASE__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(SCREAMING_SNAKE_CASE__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : list[list[str]] = []
depth_first_search([] , [] , [] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Print all the boards
for board in boards:
for column in board:
print(SCREAMING_SNAKE_CASE__ )
print("""""" )
print(len(SCREAMING_SNAKE_CASE__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 480 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : List[str] = CpmAntTokenizer
_A : str = False
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : Optional[int] = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
snake_case_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __UpperCamelCase (self ):
snake_case_ : Dict = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
snake_case_ : Any = """今天天气真好!"""
snake_case_ : str = ["""今天""", """天气""", """真""", """好""", """!"""]
snake_case_ : Dict = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : Optional[int] = """今天天气真好!"""
snake_case_ : Dict = [tokenizer.bos_token] + tokens
snake_case_ : int = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
snake_case_ : int = tokenizer.decode(lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 480 | 1 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 32
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] = 16 ) -> str:
snake_case = AutoTokenizer.from_pretrained("""bert-base-cased""" )
snake_case = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__lowerCAmelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
snake_case = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case = 16
elif accelerator.mixed_precision != "no":
snake_case = 8
else:
snake_case = None
return tokenizer.pad(
lowerCAmelCase_ , padding="""longest""" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
snake_case = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , drop_last=lowerCAmelCase_ )
snake_case = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ) -> List[Any]:
snake_case = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case = config["lr"]
snake_case = int(config["""num_epochs"""] )
snake_case = int(config["""seed"""] )
snake_case = int(config["""batch_size"""] )
snake_case = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
snake_case = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case = batch_size // MAX_GPU_BATCH_SIZE
snake_case = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
snake_case = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case = model.to(accelerator.device )
# Instantiate optimizer
snake_case = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
snake_case = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case = model(**lowerCAmelCase_ )
snake_case = outputs.loss
snake_case = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case = model(**lowerCAmelCase_ )
snake_case = outputs.logits.argmax(dim=-1 )
snake_case = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
snake_case = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowerCAmelCase_ )
def __lowerCamelCase ( ) -> Union[str, Any]:
snake_case = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
snake_case = parser.parse_args()
snake_case = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 707 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_SCREAMING_SNAKE_CASE = random.Random()
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any]=1.0 , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Optional[int]=None ) -> Optional[Any]:
if rng is None:
snake_case = global_rng
snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __snake_case : int , __snake_case : Dict=7 , __snake_case : Optional[int]=4_00 , __snake_case : Optional[int]=20_00 , __snake_case : List[str]=1 , __snake_case : str=0.0 , __snake_case : Dict=1_60_00 , __snake_case : Dict=True , __snake_case : Optional[int]=True , )-> Optional[int]:
snake_case = parent
snake_case = batch_size
snake_case = min_seq_length
snake_case = max_seq_length
snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case = feature_size
snake_case = padding_value
snake_case = sampling_rate
snake_case = return_attention_mask
snake_case = do_normalize
def lowerCAmelCase ( self : Union[str, Any] )-> Dict:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase ( self : Tuple , __snake_case : List[Any]=False , __snake_case : int=False )-> Tuple:
def _flatten(__snake_case : List[str] ):
return list(itertools.chain(*__snake_case ) )
if equal_length:
snake_case = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case = [np.asarray(__snake_case ) for x in speech_inputs]
return speech_inputs
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = WavaVecaFeatureExtractor
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = WavaVecaFeatureExtractionTester(self )
def lowerCAmelCase ( self : Dict , __snake_case : str )-> List[Any]:
self.assertTrue(np.all(np.mean(__snake_case , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__snake_case , axis=0 ) - 1 ) < 1e-3 ) )
def lowerCAmelCase ( self : Optional[Any] )-> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = [np.asarray(__snake_case ) for speech_input in speech_inputs]
# Test not batched input
snake_case = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
snake_case = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__snake_case , __snake_case , atol=1e-3 ) )
# Test batched
snake_case = feat_extract(__snake_case , return_tensors="""np""" ).input_values
snake_case = feat_extract(__snake_case , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__snake_case , __snake_case ):
self.assertTrue(np.allclose(__snake_case , __snake_case , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
snake_case = np.asarray(__snake_case )
snake_case = feat_extract(__snake_case , return_tensors="""np""" ).input_values
snake_case = feat_extract(__snake_case , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__snake_case , __snake_case ):
self.assertTrue(np.allclose(__snake_case , __snake_case , atol=1e-3 ) )
def lowerCAmelCase ( self : int )-> List[str]:
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = ["""longest""", """max_length""", """do_not_pad"""]
snake_case = [None, 16_00, None]
for max_length, padding in zip(__snake_case , __snake_case ):
snake_case = feat_extract(__snake_case , padding=__snake_case , max_length=__snake_case , return_tensors="""np""" )
snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def lowerCAmelCase ( self : Any )-> Tuple:
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = range(8_00 , 14_00 , 2_00 )
snake_case = [floats_list((1, x) )[0] for x in lengths]
snake_case = ["""longest""", """max_length""", """do_not_pad"""]
snake_case = [None, 16_00, None]
for max_length, padding in zip(__snake_case , __snake_case ):
snake_case = feat_extract(__snake_case , max_length=__snake_case , padding=__snake_case )
snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def lowerCAmelCase ( self : int )-> str:
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = feat_extract(
__snake_case , truncation=__snake_case , max_length=10_00 , padding="""max_length""" , return_tensors="""np""" )
snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase ( self : int )-> str:
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = feat_extract(
__snake_case , truncation=__snake_case , max_length=10_00 , padding="""longest""" , return_tensors="""np""" )
snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = feat_extract(
__snake_case , truncation=__snake_case , max_length=20_00 , padding="""longest""" , return_tensors="""np""" )
snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
@require_torch
def lowerCAmelCase ( self : List[str] )-> Union[str, Any]:
import torch
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = np.random.rand(1_00 ).astype(np.floataa )
snake_case = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def lowerCAmelCase ( self : str )-> List[Any]:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
snake_case = WavaVecaConfig.from_pretrained(__snake_case )
snake_case = WavaVecaFeatureExtractor.from_pretrained(__snake_case )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
| 517 | 0 |
from __future__ import annotations
class A :
def __init__( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(UpperCAmelCase__ ) != 0:
lowerCAmelCase_ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(UpperCAmelCase__ ) != cols:
raise error
for value in row:
if not isinstance(UpperCAmelCase__, (int, float) ):
raise error
lowerCAmelCase_ = rows
else:
lowerCAmelCase_ = []
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return len(self.rows )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return len(self.rows[0] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.order[0] == self.order[1]
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return bool(self.determinant() )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(UpperCAmelCase__ ).determinant()
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(UpperCAmelCase__, UpperCAmelCase__ )
return -1 * self.get_minor(UpperCAmelCase__, UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return Matrix(
[
[self.get_minor(UpperCAmelCase__, UpperCAmelCase__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
"""simple docstring"""
return str(self.rows )
def __str__( self ):
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(UpperCAmelCase__ ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise type_error
for value in row:
if not isinstance(UpperCAmelCase__, (int, float) ):
raise type_error
if len(UpperCAmelCase__ ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(UpperCAmelCase__ )
else:
lowerCAmelCase_ = self.rows[0:position] + [row] + self.rows[position:]
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise type_error
for value in column:
if not isinstance(UpperCAmelCase__, (int, float) ):
raise type_error
if len(UpperCAmelCase__ ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
lowerCAmelCase_ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowerCAmelCase_ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self, UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self, UpperCamelCase__ ):
"""simple docstring"""
return not self == other
def __neg__( self ):
"""simple docstring"""
return self * -1
def __add__( self, UpperCamelCase__ ):
"""simple docstring"""
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self, UpperCamelCase__ ):
"""simple docstring"""
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self, UpperCamelCase__ ):
"""simple docstring"""
if isinstance(UpperCAmelCase__, (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(UpperCAmelCase__, UpperCAmelCase__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self, UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
lowerCAmelCase_ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 431 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=13 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=99 , UpperCAmelCase__=32 , UpperCAmelCase__=5 , UpperCAmelCase__=4 , UpperCAmelCase__=37 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=16 , UpperCAmelCase__=2 , UpperCAmelCase__=0.02 , UpperCAmelCase__=3 , UpperCAmelCase__=4 , UpperCAmelCase__=None , ):
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def __A ( self ):
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCAmelCase__ , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = OpenLlamaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
A__ = True
A__ = OpenLlamaModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
A__ = OpenLlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
A__ = True
A__ = True
A__ = OpenLlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
A__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ = torch.cat([input_mask, next_mask] , dim=-1 )
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["hidden_states"][0]
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["hidden_states"][0]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def __A ( self ):
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowerCAmelCase : Optional[Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowerCAmelCase : Dict = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : Union[str, Any] = False
def __A ( self ):
A__ = OpenLlamaModelTester(self )
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __A ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = input_dict["input_ids"]
A__ = input_ids.ne(1 ).to(UpperCAmelCase__ )
A__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ = OpenLlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = "single_label_classification"
A__ = input_dict["input_ids"]
A__ = input_ids.ne(1 ).to(UpperCAmelCase__ )
A__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ = OpenLlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = "multi_label_classification"
A__ = input_dict["input_ids"]
A__ = input_ids.ne(1 ).to(UpperCAmelCase__ )
A__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A__ = OpenLlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def __A ( self ):
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __A ( self , UpperCAmelCase__ ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = ids_tensor([1, 10] , config.vocab_size )
A__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ = OpenLlamaModel(UpperCAmelCase__ )
original_model.to(UpperCAmelCase__ )
original_model.eval()
A__ = original_model(UpperCAmelCase__ ).last_hidden_state
A__ = original_model(UpperCAmelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ = {"type": scaling_type, "factor": 10.0}
A__ = OpenLlamaModel(UpperCAmelCase__ )
scaled_model.to(UpperCAmelCase__ )
scaled_model.eval()
A__ = scaled_model(UpperCAmelCase__ ).last_hidden_state
A__ = scaled_model(UpperCAmelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
| 491 | 0 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str:
# Initialise PyTorch model
UpperCAmelCase : Optional[int] = BertConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase : str = BertForPreTraining(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase_ )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 706 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 672 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : Dict , _a : Dict=7 , _a : List[str]=3 , _a : str=18 , _a : Optional[int]=30 , _a : Tuple=400 , _a : Optional[Any]=True , _a : Dict=None , _a : str=True , _a : Tuple=None , _a : Any=True , _a : Any=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _a : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _a : List[Any]=True , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 224, '''width''': 224}
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean
_SCREAMING_SNAKE_CASE =image_std
_SCREAMING_SNAKE_CASE =do_convert_rgb
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __UpperCamelCase ( self : Tuple , _a : Optional[Any]=False , _a : str=False , _a : Dict=False ) -> Dict:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
if torchify:
_SCREAMING_SNAKE_CASE =[torch.from_numpy(_a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , do_center_crop=_a )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_a )
_SCREAMING_SNAKE_CASE =3
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 691 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = (UniPCMultistepScheduler,)
UpperCAmelCase__ = (('''num_inference_steps''', 25),)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **UpperCAmelCase__ : str) ->Dict:
'''simple docstring'''
A__ = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**UpperCAmelCase__)
return config
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List[str]=0 , **UpperCAmelCase__ : Union[str, Any]) ->str:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs)
A__ = kwargs.pop('''num_inference_steps''' , UpperCAmelCase__)
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config(**UpperCAmelCase__)
A__ = scheduler_class(**UpperCAmelCase__)
scheduler.set_timesteps(UpperCAmelCase__)
# copy over dummy past residuals
A__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__)
A__ = scheduler_class.from_pretrained(UpperCAmelCase__)
new_scheduler.set_timesteps(UpperCAmelCase__)
# copy over dummy past residuals
A__ = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ , A__ = sample, sample
for t in range(UpperCAmelCase__ , time_step + scheduler.config.solver_order + 1):
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__).prev_sample
A__ = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List[Any]=0 , **UpperCAmelCase__ : List[str]) ->Dict:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs)
A__ = kwargs.pop('''num_inference_steps''' , UpperCAmelCase__)
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
scheduler.set_timesteps(UpperCAmelCase__)
# copy over dummy past residuals (must be after setting timesteps)
A__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__)
A__ = scheduler_class.from_pretrained(UpperCAmelCase__)
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase__)
# copy over dummy past residual (must be after setting timesteps)
A__ = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__).prev_sample
A__ = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : List[Any]) ->List[str]:
'''simple docstring'''
if scheduler is None:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**UpperCAmelCase__)
A__ = scheduler_class(**UpperCAmelCase__)
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**UpperCAmelCase__)
A__ = scheduler_class(**UpperCAmelCase__)
A__ = 10
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase__)
for i, t in enumerate(scheduler.timesteps):
A__ = model(UpperCAmelCase__ , UpperCAmelCase__)
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self : Any) ->int:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs)
A__ = kwargs.pop('''num_inference_steps''' , UpperCAmelCase__)
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = self.dummy_sample
A__ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase__ , '''set_timesteps'''):
scheduler.set_timesteps(UpperCAmelCase__)
elif num_inference_steps is not None and not hasattr(UpperCAmelCase__ , '''set_timesteps'''):
A__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A__ = [residual + 0.2, residual + 0.15, residual + 0.10]
A__ = dummy_past_residuals[: scheduler.config.solver_order]
A__ = scheduler.timesteps[5]
A__ = scheduler.timesteps[6]
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__).prev_sample
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
'''simple docstring'''
A__ = UniPCMultistepScheduler(**self.get_scheduler_config())
A__ = self.full_loop(scheduler=UpperCAmelCase__)
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_mean.item() - 0.2464) < 1e-3
A__ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
A__ = DEISMultistepScheduler.from_config(scheduler.config)
A__ = DPMSolverMultistepScheduler.from_config(scheduler.config)
A__ = UniPCMultistepScheduler.from_config(scheduler.config)
A__ = self.full_loop(scheduler=UpperCAmelCase__)
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_mean.item() - 0.2464) < 1e-3
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase__)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase__ , prediction_type=UpperCAmelCase__ , sample_max_value=UpperCAmelCase__ , solver_order=UpperCAmelCase__ , solver_type=UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase__ , solver_type=UpperCAmelCase__ , prediction_type=UpperCAmelCase__ , )
A__ = self.full_loop(
solver_order=UpperCAmelCase__ , solver_type=UpperCAmelCase__ , prediction_type=UpperCAmelCase__ , )
assert not torch.isnan(UpperCAmelCase__).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
'''simple docstring'''
self.check_over_configs(lower_order_final=UpperCAmelCase__)
self.check_over_configs(lower_order_final=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=UpperCAmelCase__ , time_step=0)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
'''simple docstring'''
A__ = self.full_loop()
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_mean.item() - 0.2464) < 1e-3
def SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
'''simple docstring'''
A__ = self.full_loop(prediction_type='''v_prediction''')
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_mean.item() - 0.1014) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(thresholding=UpperCAmelCase__ , dynamic_thresholding_ratio=0)
A__ = scheduler_class(**UpperCAmelCase__)
A__ = 10
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase__)
for i, t in enumerate(scheduler.timesteps):
A__ = model(UpperCAmelCase__ , UpperCAmelCase__)
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__).prev_sample
assert sample.dtype == torch.floataa
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **UpperCAmelCase__ : Optional[Any]) ->Optional[int]:
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config(**UpperCAmelCase__)
A__ = scheduler_class(**UpperCAmelCase__)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 177 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A__ = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A__ = features.copy() if features else default_expected_features
A__ = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ = ParquetDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A__ = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ , split=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
"""simple docstring"""
if issubclass(lowercase_ , lowercase_ ):
A__ = parquet_path
elif issubclass(lowercase_ , lowercase_ ):
A__ = [parquet_path]
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A__ = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=("train",) ) -> List[str]:
"""simple docstring"""
assert isinstance(lowercase_ , lowercase_ )
for split in splits:
A__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A__ = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_parquet_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
"""simple docstring"""
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A__ = features.copy() if features else default_expected_features
A__ = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ = ParquetDatasetReader({'''train''': parquet_path} , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
if split:
A__ = {split: parquet_path}
else:
A__ = '''train'''
A__ = {'''train''': parquet_path, '''test''': parquet_path}
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A__ = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_datasetdict(lowercase_ , lowercase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = ParquetDatasetWriter(lowercase_ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
A__ = pq.ParquetFile(tmp_path / '''foo.parquet''' )
A__ = pf.read()
assert dataset.data.table == output_table
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = str(shared_datadir / '''test_image_rgb.jpg''' )
A__ = {'''image''': [image_path]}
A__ = Features({'''image''': Image()} )
A__ = Dataset.from_dict(lowercase_ , features=lowercase_ )
A__ = ParquetDatasetWriter(lowercase_ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
A__ = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
A__ = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=lowercase_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
assert get_writer_batch_size(lowercase_ ) == expected
| 177 | 1 |
"""simple docstring"""
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
__magic_name__ : List[str] = float("""nan""")
class lowercase__ :
"""simple docstring"""
def __init__( self , _A ):
'''simple docstring'''
UpperCamelCase : Tuple = sys.stdout
UpperCamelCase : str = open(_A , """a""" )
def __getattr__( self , _A ):
'''simple docstring'''
return getattr(self.stdout , _A )
def _a ( self , _A ):
'''simple docstring'''
self.stdout.write(_A )
# strip tqdm codes
self.file.write(re.sub(r"""^.*\r""" , """""" , _A , 0 , re.M ) )
def UpperCamelCase (SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=False ):
UpperCamelCase : List[str] = []
# deal with critical env vars
UpperCamelCase : int = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
UpperCamelCase : str = os.environ.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if val is not None:
cmd.append(f"""{key}={val}""" )
# python executable (not always needed if the script is executable)
UpperCamelCase : Union[str, Any] = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(SCREAMING_SNAKE_CASE )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
UpperCamelCase : List[Any] = []
UpperCamelCase : Dict = """"""
while len(SCREAMING_SNAKE_CASE ) > 0:
current_line += f"""{cmd.pop(0 )} """
if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = """"""
return "\\\n".join(SCREAMING_SNAKE_CASE )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# unwrap multi-line input
UpperCamelCase : int = re.sub(r"""[\\\n]+""" , """ """ , args.base_cmd )
# remove --output_dir if any and set our own
UpperCamelCase : Union[str, Any] = re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd )
args.base_cmd += f""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
UpperCamelCase : Optional[Any] = re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
UpperCamelCase : Tuple = subprocess.run(SCREAMING_SNAKE_CASE , capture_output=SCREAMING_SNAKE_CASE , text=SCREAMING_SNAKE_CASE )
if verbose:
print("""STDOUT""" , result.stdout )
print("""STDERR""" , result.stderr )
# save the streams
UpperCamelCase : List[Any] = variation.replace(""" """ , """-""" )
with open(Path(SCREAMING_SNAKE_CASE ) / f"""log.{prefix}.stdout.txt""" , """w""" ) as f:
f.write(result.stdout )
with open(Path(SCREAMING_SNAKE_CASE ) / f"""log.{prefix}.stderr.txt""" , """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(f"""{output_dir}/all_results.json""" , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase : Optional[int] = json.load(SCREAMING_SNAKE_CASE )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
UpperCamelCase : Tuple = []
UpperCamelCase : List[Any] = []
UpperCamelCase : Tuple = f"""{id}: {variation:<{longest_variation_len}}"""
UpperCamelCase : List[str] = f"""{preamble}: """
UpperCamelCase : List[Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(SCREAMING_SNAKE_CASE ) , desc=SCREAMING_SNAKE_CASE , leave=SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = process_run_single(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = single_run_metrics[target_metric_key]
if not math.isnan(SCREAMING_SNAKE_CASE ):
metrics.append(SCREAMING_SNAKE_CASE )
results.append(SCREAMING_SNAKE_CASE )
outcome += "✓"
else:
outcome += "✘"
UpperCamelCase : Any = f"""\33[2K\r{outcome}"""
if len(SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase : Optional[int] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
UpperCamelCase : Any = round(mean_metrics[target_metric_key] , 2 )
UpperCamelCase : Any = f"""{outcome} {mean_target}"""
if len(SCREAMING_SNAKE_CASE ) > 1:
results_str += f""" {tuple(round(SCREAMING_SNAKE_CASE , 2 ) for x in results )}"""
print(SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = variation
return mean_metrics
else:
print(SCREAMING_SNAKE_CASE )
return {variation_key: variation, target_metric_key: nan}
def UpperCamelCase ():
UpperCamelCase : int = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return f"""
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = pd.DataFrame(SCREAMING_SNAKE_CASE )
UpperCamelCase : str = """variation"""
UpperCamelCase : int = """diff_%"""
UpperCamelCase : List[str] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
UpperCamelCase : Union[str, Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(SCREAMING_SNAKE_CASE ):
# as a fallback, use the minimal value as the sentinel
UpperCamelCase : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = df.apply(
lambda SCREAMING_SNAKE_CASE : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="""columns""" , )
# re-order columns
UpperCamelCase : Tuple = [variation_key, target_metric_key, diff_key, *report_metric_keys]
UpperCamelCase : Dict = df.reindex(SCREAMING_SNAKE_CASE , axis="""columns""" ) # reorder cols
# capitalize
UpperCamelCase : Any = df.rename(str.capitalize , axis="""columns""" )
# make the cols as narrow as possible
UpperCamelCase : int = df.rename(lambda SCREAMING_SNAKE_CASE : c.replace("""_""" , """<br>""" ) , axis="""columns""" )
UpperCamelCase : Tuple = df.rename(lambda SCREAMING_SNAKE_CASE : c.replace("""_""" , """\n""" ) , axis="""columns""" )
UpperCamelCase : List[str] = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=SCREAMING_SNAKE_CASE , floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=SCREAMING_SNAKE_CASE , floatfmt=""".2f""" )]
print("""\n\n""".join(SCREAMING_SNAKE_CASE ) )
def UpperCamelCase ():
UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="""Base cmd""" , )
parser.add_argument(
"""--variations""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , nargs="""+""" , required=SCREAMING_SNAKE_CASE , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , )
parser.add_argument(
"""--base-variation""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , )
parser.add_argument(
"""--target-metric-key""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , )
parser.add_argument(
"""--report-metric-keys""" , default="""""" , type=SCREAMING_SNAKE_CASE , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , )
parser.add_argument(
"""--repeat-times""" , default=1 , type=SCREAMING_SNAKE_CASE , help="""How many times to re-run each variation - an average will be reported""" , )
parser.add_argument(
"""--output_dir""" , default="""output_benchmark""" , type=SCREAMING_SNAKE_CASE , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , )
parser.add_argument(
"""--verbose""" , default=SCREAMING_SNAKE_CASE , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , )
UpperCamelCase : str = parser.parse_args()
UpperCamelCase : Union[str, Any] = args.output_dir
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = get_base_command(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# split each dimension into its --foo variations
UpperCamelCase : Union[str, Any] = [list(map(str.strip , re.split(r"""\|""" , SCREAMING_SNAKE_CASE ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
UpperCamelCase : int = list(map(str.strip , map(""" """.join , itertools.product(*SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase : int = max(len(SCREAMING_SNAKE_CASE ) for x in variations )
# split wanted keys
UpperCamelCase : Union[str, Any] = args.report_metric_keys.split()
# capture prints into a log file for convenience
UpperCamelCase : List[str] = f"""benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt"""
print(f"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(f"""and this script's output is also piped into {report_fn}""" )
UpperCamelCase : Union[str, Any] = Tee(SCREAMING_SNAKE_CASE )
print(f"""\n*** Running {len(SCREAMING_SNAKE_CASE )} benchmarks:""" )
print(f"""Base command: {" ".join(SCREAMING_SNAKE_CASE )}""" )
UpperCamelCase : Optional[Any] = """variation"""
UpperCamelCase : Any = []
for id, variation in enumerate(tqdm(SCREAMING_SNAKE_CASE , desc="""Total completion: """ , leave=SCREAMING_SNAKE_CASE ) ):
UpperCamelCase : Optional[int] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , args.target_metric_key , SCREAMING_SNAKE_CASE , args.repeat_times , SCREAMING_SNAKE_CASE , args.verbose , ) )
process_results(SCREAMING_SNAKE_CASE , args.target_metric_key , SCREAMING_SNAKE_CASE , args.base_variation , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 102 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowercase : List[str] = ['''text''', '''image''', '''audio''']
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowercase ( __A : List ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = []
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _A :
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""inputs""" ) )
self.assertTrue(hasattr(self.tool ,"""outputs""" ) )
snake_case : Dict = self.tool.inputs
for _input in inputs:
if isinstance(_input ,SCREAMING_SNAKE_CASE_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : str = self.tool(*SCREAMING_SNAKE_CASE_ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : Union[str, Any] = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_ ) ,self.tool.outputs )
def snake_case_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"""description""" ) )
self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.outputs ):
snake_case : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = create_inputs(self.tool.inputs )
snake_case : Any = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE_ ,self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Tuple = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Union[str, Any] = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(self.tool.outputs ) )
| 36 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 314 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__magic_name__ = get_tests_dir('''fixtures''')
__magic_name__ = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
__magic_name__ = get_tests_dir('''fixtures/dummy-config.json''')
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self :str ):
lowercase = 0
def __UpperCAmelCase ( self :Tuple ):
lowercase = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase__ , lowercase__ )
def __UpperCAmelCase ( self :Any ):
lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
def __UpperCAmelCase ( self :int ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ ).to_dict()
config_dict.pop('feature_extractor_type' )
lowercase = WavaVecaFeatureExtractor(**lowercase__ )
# save in new folder
model_config.save_pretrained(lowercase__ )
config.save_pretrained(lowercase__ )
lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ )
# make sure private variable is not incorrectly saved
lowercase = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase__ , lowercase__ )
def __UpperCAmelCase ( self :List[Any] ):
lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
def __UpperCAmelCase ( self :List[Any] ):
with self.assertRaisesRegex(
lowercase__ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowercase = AutoFeatureExtractor.from_pretrained('bert-base' )
def __UpperCAmelCase ( self :List[str] ):
with self.assertRaisesRegex(
lowercase__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ , revision='aaaaaa' )
def __UpperCAmelCase ( self :Any ):
with self.assertRaisesRegex(
lowercase__ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
lowercase = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def __UpperCAmelCase ( self :Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase__ ):
lowercase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase__ ):
lowercase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase__ )
lowercase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase__ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase__ )
lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ , trust_remote_code=lowercase__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def __UpperCAmelCase ( self :Optional[int] ):
try:
AutoConfig.register('custom' , lowercase__ )
AutoFeatureExtractor.register(lowercase__ , lowercase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase__ ):
AutoFeatureExtractor.register(lowercase__ , lowercase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase = CustomFeatureExtractor.from_pretrained(lowercase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase__ )
lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __UpperCAmelCase ( self :Any ):
class a__ ( _snake_case ):
"""simple docstring"""
A__ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase__ )
AutoFeatureExtractor.register(lowercase__ , lowercase__ )
# If remote code is not set, the default is to use local
lowercase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowercase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase__ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowercase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase__ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase__ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 314 | 1 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
def lowerCamelCase__ ( A_ , A_ , A_ ):
UpperCAmelCase_ = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(A_ ) == len(A_ ), F"""{len(A_ )} != {len(A_ )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__snake_case : Union[str, Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__snake_case : Dict = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def lowerCamelCase__ ( A_ , A_ ):
try:
UpperCAmelCase_ = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(A_ ) )
def lowerCamelCase__ ( A_ , A_ ):
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(A_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def lowerCamelCase__ ( A_ , A_ = "student" , A_ = None , A_ = None , A_=False , A_=None , A_=None , **A_ , ):
UpperCAmelCase_ = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(A_ , A_ ):
AutoTokenizer.from_pretrained(A_ ).save_pretrained(A_ ) # purely for convenience
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(A_ ).eval()
else:
assert isinstance(A_ , A_ ), F"""teacher must be a model or string got type {type(A_ )}"""
UpperCAmelCase_ = teacher.config.to_diff_dict()
try:
UpperCAmelCase_ , UpperCAmelCase_ = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase_ = teacher_e
if d is None:
UpperCAmelCase_ = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase_ , UpperCAmelCase_ = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase_ , UpperCAmelCase_ = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase_ = teacher_e
if d is None:
UpperCAmelCase_ = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(A_ )
# Copy weights
UpperCAmelCase_ = teacher.config_class(**A_ )
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_config(A_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase_ = student.load_state_dict(teacher.state_dict() , strict=A_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase_ , UpperCAmelCase_ = list(range(A_ ) ), list(range(A_ ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(A_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase_ = pick_layers_to_copy(A_ , A_ )
if d_layers_to_copy is None:
UpperCAmelCase_ = pick_layers_to_copy(A_ , A_ )
try:
if hasattr(
A_ , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , A_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , A_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , A_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , A_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , A_ )
copy_layers(teacher.decoder.block , student.decoder.block , A_ )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
UpperCAmelCase_ = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(A_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 660 | '''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowercase_ ( _A ):
a_ = """"""
a_ = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Tuple:
"""simple docstring"""
super().__init__(self , **UpperCamelCase__ )
UpperCAmelCase_ = repo_info
UpperCAmelCase_ = token
UpperCAmelCase_ = None
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
if self.dir_cache is None:
UpperCAmelCase_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(UpperCamelCase__ ): {"name": str(UpperCamelCase__ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = "rb" , **UpperCamelCase__ , ) -> Optional[int]:
"""simple docstring"""
if not isinstance(self.repo_info , UpperCamelCase__ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
UpperCAmelCase_ = hf_hub_url(self.repo_info.id , UpperCamelCase__ , revision=self.repo_info.sha )
return fsspec.open(
UpperCamelCase__ , mode=UpperCamelCase__ , headers=get_authentication_headers_for_url(UpperCamelCase__ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = self._strip_protocol(UpperCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> str:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = PurePosixPath(path.strip("/" ) )
UpperCAmelCase_ = {}
for p, f in self.dir_cache.items():
UpperCAmelCase_ = PurePosixPath(p.strip("/" ) )
UpperCAmelCase_ = p.parent
if root == path:
UpperCAmelCase_ = f
UpperCAmelCase_ = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 1 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__a = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = XLNetTokenizer
lowercase = XLNetTokenizerFast
lowercase = True
lowercase = True
def lowerCamelCase ( self : Dict ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Union[str, Any] = XLNetTokenizer(snake_case_ , keep_accents=snake_case_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self : str ):
snake_case__ : str = """<s>"""
snake_case__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCamelCase ( self : List[Any] ):
snake_case__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(snake_case_ ) , 1_006 )
def lowerCamelCase ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowerCamelCase ( self : int ):
snake_case__ : Any = XLNetTokenizer(snake_case_ , keep_accents=snake_case_ )
snake_case__ : Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(snake_case_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [285, 46, 10, 170, 382] )
snake_case__ : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case__ : Optional[Any] = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
snake_case__ : List[str] = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowerCamelCase ( self : Tuple ):
snake_case__ : Union[str, Any] = XLNetTokenizer(snake_case_ , do_lower_case=snake_case_ )
snake_case__ : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def lowerCamelCase ( self : int ):
snake_case__ : Optional[Any] = XLNetTokenizer(snake_case_ , do_lower_case=snake_case_ )
snake_case__ : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
snake_case__ : List[str] = tokenizer.encode("""sequence builders""" , add_special_tokens=snake_case_ )
snake_case__ : str = tokenizer.encode("""multi-sequence build""" , add_special_tokens=snake_case_ )
snake_case__ : List[Any] = tokenizer.build_inputs_with_special_tokens(snake_case_ )
snake_case__ : int = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase ( self : Optional[Any] ):
# fmt: off
snake_case__ : Any = {"""input_ids""": [[17, 21_442, 270, 17, 10, 14_645, 318, 34, 17, 4_546, 3_145, 787, 13, 7_752, 22_018, 23, 21, 17, 4_546, 3_145, 787, 13, 3_352, 14_431, 13, 5_500, 11, 1_176, 580, 13, 16_819, 4_797, 23, 17, 10, 17_135, 658, 19, 457, 7_932, 13, 184, 19, 3_154, 17_135, 6_468, 19, 1_404, 12_269, 19, 4_229, 5_356, 16_264, 46, 19, 17, 20_545, 10_395, 9, 9, 9, 11, 28, 6_421, 9_531, 20_729, 17, 10, 353, 17_022, 11, 21, 6_421, 9_531, 16_949, 17, 10, 11_509, 753, 11, 33, 95, 2_421, 7_385, 956, 14_431, 2_626, 25, 842, 7_385, 4_836, 21, 1_429, 2_272, 9_855, 3_120, 161, 24_738, 19, 13_203, 658, 218, 787, 21, 430, 18_482, 847, 2_637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22_178, 27, 1_064, 22, 956, 13, 11_101, 1_429, 5_854, 24_313, 18_953, 40, 422, 24_366, 68, 1_758, 37, 10_483, 14_257, 31, 207, 263, 21, 203, 3_773, 25, 71, 9_735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2_049, 3_442, 17, 13_894, 3_380, 23, 95, 18, 17_634, 2_288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 301 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __snake_case( _lowerCAmelCase ) -> Dict:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
snake_case__ : Dict = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
snake_case__ : int = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
snake_case__ : Optional[Any] = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
snake_case__ : Any = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
snake_case__ : Optional[Any] = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
snake_case__ : Union[str, Any] = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
snake_case__ : str = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
snake_case__ : str = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
snake_case__ : Any = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
snake_case__ : Optional[int] = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
snake_case__ : Dict = key.replace("""image_encoder.module""" , """flava.image_model""" )
snake_case__ : Any = key.replace("""text_encoder.module""" , """flava.text_model""" )
snake_case__ : Any = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
snake_case__ : Dict = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
snake_case__ : str = key.replace("""text_projection""" , """flava.text_projection""" )
snake_case__ : str = key.replace("""image_projection""" , """flava.image_projection""" )
snake_case__ : Any = value.float()
for key, value in codebook_state_dict.items():
snake_case__ : Dict = value
return upgrade
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ) -> Tuple:
if config_path is not None:
snake_case__ : Dict = FlavaConfig.from_pretrained(_lowerCAmelCase )
else:
snake_case__ : Any = FlavaConfig()
snake_case__ : List[str] = FlavaForPreTraining(_lowerCAmelCase ).eval()
snake_case__ : List[str] = convert_dalle_checkpoint(_lowerCAmelCase , _lowerCAmelCase , save_checkpoint=_lowerCAmelCase )
if os.path.exists(_lowerCAmelCase ):
snake_case__ : Tuple = torch.load(_lowerCAmelCase , map_location="""cpu""" )
else:
snake_case__ : Union[str, Any] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )
snake_case__ : Optional[Any] = upgrade_state_dict(_lowerCAmelCase , _lowerCAmelCase )
hf_model.load_state_dict(_lowerCAmelCase )
snake_case__ : Optional[Any] = hf_model.state_dict()
snake_case__ : int = count_parameters(_lowerCAmelCase )
snake_case__ : Optional[Any] = count_parameters(_lowerCAmelCase ) + count_parameters(_lowerCAmelCase )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
__a = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 301 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowercase : List[Any] = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ['''OwlViTFeatureExtractor''']
__lowercase : str = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowercase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 36 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowercase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowercase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowercase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowercase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def lowerCAmelCase__(self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] , )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase=0.9 , _lowercase=3 , _lowercase=0.5 ):
'''simple docstring'''
if NLTK_VERSION >= version.Version("""3.6.5""" ):
__a : Dict = [
meteor_score.single_meteor_score(
word_tokenize(_lowercase ) , word_tokenize(_lowercase ) , alpha=_lowercase , beta=_lowercase , gamma=_lowercase )
for ref, pred in zip(_lowercase , _lowercase )
]
else:
__a : Optional[int] = [
meteor_score.single_meteor_score(_lowercase , _lowercase , alpha=_lowercase , beta=_lowercase , gamma=_lowercase )
for ref, pred in zip(_lowercase , _lowercase )
]
return {"meteor": np.mean(_lowercase )}
| 581 | 0 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 707 |
def _A ( _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase : Tuple = len(_UpperCamelCase )
_UpperCAmelCase : Tuple = len(_UpperCamelCase )
_UpperCAmelCase : Dict = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_UpperCAmelCase : List[Any] = True
for i in range(_UpperCamelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_UpperCAmelCase : List[Any] = True
if a[i].islower():
_UpperCAmelCase : str = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 416 | 0 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
SCREAMING_SNAKE_CASE__ : List[str] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
SCREAMING_SNAKE_CASE__ : Optional[int] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
SCREAMING_SNAKE_CASE__ : Any = BeautifulSoup(res.text, "html.parser")
SCREAMING_SNAKE_CASE__ : List[str] = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(F'https://google.com{link.get("href")}')
| 298 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowerCAmelCase__ ( __lowercase ):
a__ : List[str] = """longformer"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[List[int], int] = 5_12 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 3_05_22 , SCREAMING_SNAKE_CASE__ : int = 7_68 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : int = 30_72 , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 5_12 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1e-12 , SCREAMING_SNAKE_CASE__ : bool = False , **SCREAMING_SNAKE_CASE__ : str , ) -> str:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = attention_window
__lowerCamelCase = sep_token_id
__lowerCamelCase = bos_token_id
__lowerCamelCase = eos_token_id
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = onnx_export
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : "PretrainedConfig" , SCREAMING_SNAKE_CASE__ : str = "default" , SCREAMING_SNAKE_CASE__ : "List[PatchingSpec]" = None ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = True
@property
def __A ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def __A ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
__lowerCamelCase = super().outputs
if self.task == "default":
__lowerCamelCase = {0: '''batch'''}
return outputs
@property
def __A ( self : Tuple ) -> float:
return 1e-4
@property
def __A ( self : str ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : "PreTrainedTokenizerBase" , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
__lowerCamelCase = super().generate_dummy_inputs(
preprocessor=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
__lowerCamelCase = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
__lowerCamelCase = 1
return inputs
| 298 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : Optional[Any] = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = ["""PoolFormerFeatureExtractor"""]
lowerCAmelCase : List[Any] = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 630 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = year % 1_9
_lowerCAmelCase : Any = year % 4
_lowerCAmelCase : Optional[int] = year % 7
_lowerCAmelCase : int = math.floor(year / 1_0_0 )
_lowerCAmelCase : Dict = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
_lowerCAmelCase : Optional[Any] = leap_day_inhibits / 4
_lowerCAmelCase : Dict = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
_lowerCAmelCase : List[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_lowerCAmelCase : Dict = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
_lowerCAmelCase : Union[str, Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(_A , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(_A , 4 , 1_8 )
else:
return datetime(_A , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
lowerCAmelCase : List[str] = """will be""" if year > datetime.now().year else """was"""
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 630 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[Any] ):
SCREAMING_SNAKE_CASE : str = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
SCREAMING_SNAKE_CASE : List[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
SCREAMING_SNAKE_CASE : Optional[Any] = 48
SCREAMING_SNAKE_CASE : int = '''pixelshuffle_aux'''
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
SCREAMING_SNAKE_CASE : List[Any] = [6, 6, 6, 6]
SCREAMING_SNAKE_CASE : Optional[Any] = 60
SCREAMING_SNAKE_CASE : Optional[int] = [6, 6, 6, 6]
SCREAMING_SNAKE_CASE : List[str] = '''pixelshuffledirect'''
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = 4
SCREAMING_SNAKE_CASE : Optional[int] = '''nearest+conv'''
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : List[Any] = 1
SCREAMING_SNAKE_CASE : str = 1_26
SCREAMING_SNAKE_CASE : List[str] = 7
SCREAMING_SNAKE_CASE : Tuple = 255.0
SCREAMING_SNAKE_CASE : int = ''''''
return config
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :Tuple ):
if "patch_embed.proj" in name and "layers" not in name:
SCREAMING_SNAKE_CASE : str = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE : int = name.replace('''patch_embed.norm''' , '''embeddings.patch_embeddings.layernorm''' )
if "layers" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''layers''' , '''encoder.stages''' )
if "residual_group.blocks" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace('''residual_group.blocks''' , '''layers''' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
SCREAMING_SNAKE_CASE : Any = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
SCREAMING_SNAKE_CASE : int = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
SCREAMING_SNAKE_CASE : int = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
SCREAMING_SNAKE_CASE : str = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE : Any = name.replace('''patch_embed.proj''' , '''patch_embed.projection''' )
if name == "norm.weight":
SCREAMING_SNAKE_CASE : str = '''layernorm.weight'''
if name == "norm.bias":
SCREAMING_SNAKE_CASE : List[Any] = '''layernorm.bias'''
if "conv_first" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''conv_first''' , '''first_convolution''' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''conv_last''' , '''final_convolution''' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''conv_before_upsample.0''' , '''conv_before_upsample''' )
if "upsample.0" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace('''upsample.0''' , '''upsample.convolution_0''' )
if "upsample.2" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace('''upsample.2''' , '''upsample.convolution_1''' )
SCREAMING_SNAKE_CASE : int = '''upsample.''' + name
elif config.upsampler == "pixelshuffledirect":
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''upsample.0.weight''' , '''upsample.conv.weight''' )
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''upsample.0.bias''' , '''upsample.conv.bias''' )
else:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = '''swin2sr.''' + name
return name
def __lowercase (_SCREAMING_SNAKE_CASE :Dict , _SCREAMING_SNAKE_CASE :int ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Any = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "qkv" in key:
SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' )
SCREAMING_SNAKE_CASE : int = int(key_split[1] )
SCREAMING_SNAKE_CASE : int = int(key_split[4] )
SCREAMING_SNAKE_CASE : Any = config.embed_dim
if "weight" in key:
SCREAMING_SNAKE_CASE : Optional[Any] = val[:dim, :]
SCREAMING_SNAKE_CASE : Optional[int] = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE : Dict = val[:dim]
SCREAMING_SNAKE_CASE : Dict = val[dim : dim * 2]
SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:]
pass
else:
SCREAMING_SNAKE_CASE : Dict = val
return orig_state_dict
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :Dict ):
SCREAMING_SNAKE_CASE : int = get_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = SwinaSRForImageSuperResolution(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : Any = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('''Missing keys when converting: {}'''.format(_SCREAMING_SNAKE_CASE ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'''Unexpected key {key} in state_dict''' )
# verify values
SCREAMING_SNAKE_CASE : Tuple = '''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'''
SCREAMING_SNAKE_CASE : int = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
SCREAMING_SNAKE_CASE : Any = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE : List[str] = 1_26 if '''Jpeg''' in checkpoint_url else 2_56
SCREAMING_SNAKE_CASE : Union[str, Any] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
SCREAMING_SNAKE_CASE : Union[str, Any] = transforms(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
if config.num_channels == 1:
SCREAMING_SNAKE_CASE : Optional[Any] = pixel_values[:, 0, :, :].unsqueeze(1 )
SCREAMING_SNAKE_CASE : List[str] = model(_SCREAMING_SNAKE_CASE )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
SCREAMING_SNAKE_CASE : Dict = torch.Size([1, 3, 5_12, 5_12] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
SCREAMING_SNAKE_CASE : Any = torch.Size([1, 3, 10_24, 10_24] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size([1, 3, 10_24, 10_24] )
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
SCREAMING_SNAKE_CASE : List[Any] = torch.Size([1, 3, 5_12, 5_12] )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = torch.Size([1, 3, 10_24, 10_24] )
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print('''Looks ok!''' )
SCREAMING_SNAKE_CASE : int = {
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': (
'''swin2SR-classical-sr-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': (
'''swin2SR-classical-sr-x4-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': (
'''swin2SR-compressed-sr-x4-48'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': (
'''swin2SR-lightweight-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': (
'''swin2SR-realworld-sr-x4-64-bsrgan-psnr'''
),
}
SCREAMING_SNAKE_CASE : Optional[Any] = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
model.push_to_hub(F'''caidas/{model_name}''' )
processor.push_to_hub(F'''caidas/{model_name}''' )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
snake_case_ = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 507 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class a__ ( _lowercase, _lowercase ):
__magic_name__ : List[Any] = "resnet"
__magic_name__ : Optional[int] = ["basic", "bottleneck"]
def __init__(self : Optional[Any], __UpperCAmelCase : Optional[Any]=3, __UpperCAmelCase : Tuple=64, __UpperCAmelCase : str=[256, 512, 1024, 2048], __UpperCAmelCase : Optional[Any]=[3, 4, 6, 3], __UpperCAmelCase : List[str]="bottleneck", __UpperCAmelCase : Dict="relu", __UpperCAmelCase : Any=False, __UpperCAmelCase : List[Any]=None, __UpperCAmelCase : List[str]=None, **__UpperCAmelCase : int, ) -> Any:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = embedding_size
SCREAMING_SNAKE_CASE : Tuple = hidden_sizes
SCREAMING_SNAKE_CASE : Dict = depths
SCREAMING_SNAKE_CASE : List[str] = layer_type
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = downsample_in_first_stage
SCREAMING_SNAKE_CASE : str = ['''stem'''] + [F'''stage{idx}''' for idx in range(1, len(__UpperCAmelCase ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase, out_indices=__UpperCAmelCase, stage_names=self.stage_names )
class a__ ( _lowercase ):
__magic_name__ : Optional[int] = version.parse("1.11" )
@property
def lowercase__ (self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase__ (self : int ) -> float:
"""simple docstring"""
return 1e-3
| 507 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = ['''model.decoder.embed_positions.weights''']
def _a ( __UpperCamelCase ):
if "emb" in name:
a_ : str = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
a_ : Tuple = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
a_ : Dict = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
a_ : Optional[Any] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
a_ : List[str] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
a_ : int = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
a_ : List[Any] = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
a_ : List[Any] = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
a_ : int = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
a_ : int = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
a_ : Tuple = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _a ( __UpperCamelCase , __UpperCamelCase ):
a_ : List[Any] = list(state_dict.keys() )
a_ : Union[str, Any] = {}
for key in keys:
a_ : int = state_dict.pop(__UpperCamelCase )
a_ : Dict = rename_keys(__UpperCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
a_ : Dict = val[:hidden_size, :]
a_ : Optional[int] = val[hidden_size : 2 * hidden_size, :]
a_ : Any = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
a_ : Any = val
else:
a_ : Tuple = val
return state_dict, enc_dec_proj_state_dict
def _a ( __UpperCamelCase ):
if checkpoint == "small":
# default config values
a_ : List[Any] = 1_0_2_4
a_ : Optional[Any] = 2_4
a_ : Optional[Any] = 1_6
elif checkpoint == "medium":
a_ : Any = 1_5_3_6
a_ : int = 4_8
a_ : int = 2_4
elif checkpoint == "large":
a_ : Any = 2_0_4_8
a_ : List[str] = 4_8
a_ : Union[str, Any] = 3_2
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
a_ : List[Any] = MusicgenDecoderConfig(
hidden_size=__UpperCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__UpperCamelCase , num_attention_heads=__UpperCamelCase , )
return config
@torch.no_grad()
def _a ( __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="cpu" ):
a_ : Optional[Any] = MusicGen.get_pretrained(__UpperCamelCase , device=__UpperCamelCase )
a_ : List[Any] = decoder_config_from_checkpoint(__UpperCamelCase )
a_ : List[Any] = fairseq_model.lm.state_dict()
a_ : Dict = rename_state_dict(
__UpperCamelCase , hidden_size=decoder_config.hidden_size )
a_ : Any = TaEncoderModel.from_pretrained("""t5-base""" )
a_ : Union[str, Any] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
a_ : str = MusicgenForCausalLM(__UpperCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
a_ : Optional[Any] = decoder.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(__UpperCamelCase ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
a_ : Tuple = MusicgenForConditionalGeneration(text_encoder=__UpperCamelCase , audio_encoder=__UpperCamelCase , decoder=__UpperCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__UpperCamelCase )
# check we can do a forward pass
a_ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
a_ : List[str] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
a_ : Optional[Any] = model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase ).logits
if logits.shape != (8, 1, 2_0_4_8):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
a_ : Dict = AutoTokenizer.from_pretrained("""t5-base""" )
a_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
a_ : List[str] = MusicgenProcessor(feature_extractor=__UpperCamelCase , tokenizer=__UpperCamelCase )
# set the appropriate bos/pad token ids
a_ : Optional[Any] = 2_0_4_8
a_ : List[Any] = 2_0_4_8
# set other default generation config params
a_ : List[str] = int(3_0 * audio_encoder.config.frame_rate )
a_ : str = True
a_ : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(__UpperCamelCase )
processor.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
__lowerCamelCase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 711 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class a__ ( lowerCAmelCase_ ):
lowerCamelCase__: torch.FloatTensor
lowerCamelCase__: Optional[torch.FloatTensor] = None
def _a ( __UpperCamelCase , __UpperCamelCase=0.9_99 , __UpperCamelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCamelCase ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
a_ : List[Any] = []
for i in range(__UpperCamelCase ):
a_ : str = i / num_diffusion_timesteps
a_ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) , __UpperCamelCase ) )
return torch.tensor(__UpperCamelCase , dtype=torch.floataa )
class a__ ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self : str , lowerCamelCase_ : int = 1_0_0_0 , lowerCamelCase_ : str = "fixed_small_log" , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[float] = 1.0 , lowerCamelCase_ : str = "epsilon" , lowerCamelCase_ : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
a_ : Dict = betas_for_alpha_bar(lowerCamelCase_ )
a_ : List[Any] = 1.0 - self.betas
a_ : List[str] = torch.cumprod(self.alphas , dim=0 )
a_ : List[Any] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
a_ : int = 1.0
# setable values
a_ : List[str] = None
a_ : Optional[int] = torch.from_numpy(np.arange(0 , lowerCamelCase_ )[::-1].copy() )
a_ : List[Any] = variance_type
def UpperCAmelCase( self : int , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : Optional[int] = None ):
return sample
def UpperCAmelCase( self : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, torch.device] = None ):
a_ : Tuple = num_inference_steps
a_ : Tuple = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
a_ : Tuple = (np.arange(0 , lowerCamelCase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
a_ : Dict = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ )
def UpperCAmelCase( self : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : str=None ):
if prev_timestep is None:
a_ : List[Any] = t - 1
a_ : Any = self.alphas_cumprod[t]
a_ : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
a_ : List[Any] = 1 - alpha_prod_t
a_ : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
a_ : int = self.betas[t]
else:
a_ : Optional[int] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
a_ : int = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
a_ : Optional[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
a_ : Dict = torch.log(torch.clamp(lowerCamelCase_ , min=1E-20 ) )
a_ : List[Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
a_ : Dict = variance.log()
a_ : Optional[Any] = beta.log()
a_ : str = (predicted_variance + 1) / 2
a_ : Optional[int] = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase( self : Union[str, Any] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : int , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : bool = True , ):
a_ : List[str] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
a_ , a_ : List[str] = torch.split(lowerCamelCase_ , sample.shape[1] , dim=1 )
else:
a_ : Any = None
# 1. compute alphas, betas
if prev_timestep is None:
a_ : Optional[Any] = t - 1
a_ : Tuple = self.alphas_cumprod[t]
a_ : Any = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
a_ : int = 1 - alpha_prod_t
a_ : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
a_ : Optional[int] = self.betas[t]
a_ : int = self.alphas[t]
else:
a_ : int = 1 - alpha_prod_t / alpha_prod_t_prev
a_ : Dict = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
a_ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
a_ : Optional[Any] = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
a_ : Dict = torch.clamp(
lowerCamelCase_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a_ : Dict = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
a_ : Any = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a_ : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
a_ : Optional[int] = 0
if t > 0:
a_ : Any = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowerCamelCase_ , device=model_output.device )
a_ : Tuple = self._get_variance(
lowerCamelCase_ , predicted_variance=lowerCamelCase_ , prev_timestep=lowerCamelCase_ , )
if self.variance_type == "fixed_small_log":
a_ : str = variance
elif self.variance_type == "learned_range":
a_ : Optional[Any] = (0.5 * variance).exp()
else:
raise ValueError(
F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
""" for the UnCLIPScheduler.""" )
a_ : List[str] = variance * variance_noise
a_ : List[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowerCamelCase_ , pred_original_sample=lowerCamelCase_ )
def UpperCAmelCase( self : Tuple , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
a_ : Tuple = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
a_ : Union[str, Any] = timesteps.to(original_samples.device )
a_ : List[str] = alphas_cumprod[timesteps] ** 0.5
a_ : Tuple = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
a_ : Dict = sqrt_alpha_prod.unsqueeze(-1 )
a_ : int = (1 - alphas_cumprod[timesteps]) ** 0.5
a_ : Any = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
a_ : Tuple = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
a_ : List[str] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 478 | 0 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __lowercase( lowerCamelCase_ ):
'''simple docstring'''
__a : Optional[Any] = 'EncodecFeatureExtractor'
__a : int = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self , __a , __a ):
super().__init__(__a , __a )
__lowerCamelCase : Optional[int] = self.feature_extractor
__lowerCamelCase : Optional[Any] = False
def snake_case_ ( self , __a=None , __a=None , __a=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__a , language=__a , no_timestamps=__a )
def __call__( self , *__a , **__a ):
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
__lowerCamelCase : str = kwargs.pop('audio' , __a )
__lowerCamelCase : Optional[int] = kwargs.pop('sampling_rate' , __a )
__lowerCamelCase : Optional[int] = kwargs.pop('text' , __a )
if len(__a ) > 0:
__lowerCamelCase : Optional[int] = args[0]
__lowerCamelCase : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if text is not None:
__lowerCamelCase : str = self.tokenizer(__a , **__a )
if audio is not None:
__lowerCamelCase : Optional[Any] = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__lowerCamelCase : Union[str, Any] = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
__lowerCamelCase : str = audio_inputs["padding_mask"]
return inputs
def snake_case_ ( self , *__a , **__a ):
__lowerCamelCase : int = kwargs.pop('audio' , __a )
__lowerCamelCase : Union[str, Any] = kwargs.pop('padding_mask' , __a )
if len(__a ) > 0:
__lowerCamelCase : Optional[Any] = args[0]
__lowerCamelCase : str = args[1:]
if audio_values is not None:
return self._decode_audio(__a , padding_mask=__a )
else:
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case_ ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
def snake_case_ ( self , __a , __a = None ):
__lowerCamelCase : int = to_numpy(__a )
__lowerCamelCase : List[Any] = audio_values.shape
if padding_mask is None:
return list(__a )
__lowerCamelCase : int = to_numpy(__a )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__lowerCamelCase : Union[str, Any] = seq_len - padding_mask.shape[-1]
__lowerCamelCase : Dict = 1 - self.feature_extractor.padding_value
__lowerCamelCase : str = np.pad(__a , ((0, 0), (0, difference)) , 'constant' , constant_values=__a )
__lowerCamelCase : List[str] = audio_values.tolist()
for i in range(__a ):
__lowerCamelCase : Tuple = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__lowerCamelCase : Dict = sliced_audio.reshape(__a , -1 )
return audio_values
| 594 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
UpperCamelCase : List[Any] = VideoClassificationPipeline(model=lowerCamelCase , image_processor=lowerCamelCase , top_k=2 )
UpperCamelCase : List[str] = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase ) -> List[Any]:
'''simple docstring'''
for example in examples:
UpperCamelCase : int = video_classifier(lowerCamelCase )
self.assertEqual(
lowerCamelCase , [
{"score": ANY(lowerCamelCase ), "label": ANY(lowerCamelCase )},
{"score": ANY(lowerCamelCase ), "label": ANY(lowerCamelCase )},
] , )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : List[Any] = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
UpperCamelCase : Any = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
UpperCamelCase : List[str] = pipeline(
"video-classification" , model=lowerCamelCase , feature_extractor=lowerCamelCase , frame_sampling_rate=4 )
UpperCamelCase : int = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
UpperCamelCase : List[str] = video_classifier(lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}] , )
UpperCamelCase : List[Any] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
] , )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
| 173 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class a__ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_: str = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowerCamelCase_: Tuple = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowerCamelCase_: Any = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowerCamelCase_: int = tf_top_k_top_p_filtering(A_ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
lowerCamelCase_: Optional[Any] = output[output != -float("""inf""" )]
lowerCamelCase_: int = tf.cast(
tf.where(tf.not_equal(A_ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(A_ , A_ , rtol=1e-12 )
tf.debugging.assert_equal(A_ , A_ )
@require_tf
class a__ ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
_A = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCamelCase_: Dict = 2
lowerCamelCase_: List[Any] = 2
class a__ ( tf.Module ):
def __init__( self : str , A_ : List[Any] ) -> Dict:
"""simple docstring"""
super(A_ , self ).__init__()
lowerCamelCase_: str = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=A_ , )
def lowerCAmelCase ( self : List[Any] , A_ : Dict , A_ : str ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_: List[Any] = self.model.generate(
input_ids=A_ , attention_mask=A_ , max_new_tokens=A_ , return_dict_in_generate=A_ , )
return {"sequences": outputs["sequences"]}
lowerCamelCase_: str = [[2, 0], [1_02, 1_03]]
lowerCamelCase_: Dict = [[1, 0], [1, 1]]
lowerCamelCase_: Any = DummyModel(model=A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(A_ , A_ , signatures={"""serving_default""": dummy_model.serving} )
lowerCamelCase_: Optional[Any] = tf.saved_model.load(A_ ).signatures["""serving_default"""]
for batch_size in range(1 , len(A_ ) + 1 ):
lowerCamelCase_: Dict = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
lowerCamelCase_: List[str] = serving_func(**A_ )["""sequences"""]
lowerCamelCase_: Dict = test_model.generate(**A_ , max_new_tokens=A_ )
tf.debugging.assert_equal(A_ , A_ )
@slow
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
lowerCamelCase_: Any = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCamelCase_: List[str] = 1
lowerCamelCase_: int = 2
class a__ ( tf.Module ):
def __init__( self : Union[str, Any] , A_ : Any ) -> Optional[Any]:
"""simple docstring"""
super(A_ , self ).__init__()
lowerCamelCase_: int = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=A_ , )
def lowerCAmelCase ( self : Dict , A_ : Optional[Any] , A_ : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_: Dict = self.model.generate(
input_ids=A_ , attention_mask=A_ , max_new_tokens=A_ , return_dict_in_generate=A_ , )
return {"sequences": outputs["sequences"]}
lowerCamelCase_: Any = [[2], [1_02, 1_03]]
lowerCamelCase_: Optional[int] = [[1], [1, 1]]
lowerCamelCase_: Optional[int] = DummyModel(model=A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(A_ , A_ , signatures={"""serving_default""": dummy_model.serving} )
lowerCamelCase_: List[Any] = tf.saved_model.load(A_ ).signatures["""serving_default"""]
for input_row in range(len(A_ ) ):
lowerCamelCase_: str = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
lowerCamelCase_: Optional[Any] = serving_func(**A_ )["""sequences"""]
lowerCamelCase_: Optional[Any] = test_model.generate(**A_ , max_new_tokens=A_ )
tf.debugging.assert_equal(A_ , A_ )
@slow
@require_tensorflow_text
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=A_ )
class a__ ( tf.keras.layers.Layer ):
def __init__( self : Tuple ) -> Any:
"""simple docstring"""
super().__init__()
lowerCamelCase_: Union[str, Any] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(A_ , """spiece.model""" ) , """rb""" ).read() )
lowerCamelCase_: Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def lowerCAmelCase ( self : Tuple , A_ : int , *A_ : Dict , **A_ : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_: List[Any] = self.tokenizer.tokenize(A_ )
lowerCamelCase_: Union[str, Any] = text.pad_model_inputs(
A_ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
lowerCamelCase_: Tuple = self.model.generate(input_ids=A_ , attention_mask=A_ )
return self.tokenizer.detokenize(A_ )
lowerCamelCase_: Optional[int] = CompleteSentenceTransformer()
lowerCamelCase_: Dict = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
lowerCamelCase_: Tuple = complete_model(A_ )
lowerCamelCase_: Tuple = tf.keras.Model(A_ , A_ )
keras_model.save(A_ )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_: Optional[int] = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
lowerCamelCase_: int = 14
lowerCamelCase_: Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCamelCase_: Dict = """Hello, my dog is cute and"""
lowerCamelCase_: int = tokenizer(A_ , return_tensors="""tf""" )
lowerCamelCase_: List[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCamelCase_: Union[str, Any] = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
lowerCamelCase_: Optional[Any] = model.generate(**A_ , eos_token_id=A_ , **A_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowerCamelCase_: List[str] = [6_38, 1_98]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
lowerCamelCase_: Tuple = model.generate(**A_ , eos_token_id=A_ , **A_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_: Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
lowerCamelCase_: Optional[int] = """Hugging Face is a technology company based in New York and Paris."""
lowerCamelCase_: Dict = bart_tokenizer(A_ , return_tensors="""tf""" ).input_ids
lowerCamelCase_: List[Any] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
lowerCamelCase_: Dict = bart_model.generate(A_ ).numpy()
class a__ ( __SCREAMING_SNAKE_CASE ):
def lowerCAmelCase ( self : List[str] , A_ : Union[str, Any] , A_ : int=None , **A_ : Dict ) -> List[Any]:
"""simple docstring"""
return super().call(A_ , **A_ )
lowerCamelCase_: List[str] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
lowerCamelCase_: Optional[int] = bart_model.generate(A_ , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(A_ , A_ ) )
class a__ ( bart_model.model.encoder.__class__ ):
def lowerCAmelCase ( self : List[str] , A_ : Union[str, Any] , **A_ : Tuple ) -> List[str]:
"""simple docstring"""
return super().call(A_ , **A_ )
lowerCamelCase_: List[Any] = FakeEncoder(bart_model.config , bart_model.model.shared )
lowerCamelCase_: Any = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowerCamelCase_: Union[str, Any] = bart_model.generate(A_ ).numpy()
with self.assertRaises(A_ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(A_ , foo="""bar""" )
| 707 | from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a__ :
_A = 42
_A = 42
class a__ :
def __init__( self : Optional[Any] , A_ : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_: list[list[Edge]] = [[] for _ in range(A_ )]
lowerCamelCase_: Dict = size
def __getitem__( self : Any , A_ : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
return self._size
def lowerCAmelCase ( self : Union[str, Any] , A_ : int , A_ : int , A_ : int ) -> Union[str, Any]:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(A_ , A_ ) )
def lowerCAmelCase ( self : Any , A_ : int , A_ : int ) -> int | None:
"""simple docstring"""
lowerCamelCase_: str = deque([start_vertex] )
lowerCamelCase_: list[int | None] = [None] * self.size
lowerCamelCase_: int = 0
while queue:
lowerCamelCase_: List[Any] = queue.popleft()
lowerCamelCase_: Any = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCamelCase_: Dict = current_distance + edge.weight
lowerCamelCase_: Dict = distances[edge.destination_vertex]
if (
isinstance(A_ , A_ )
and new_distance >= dest_vertex_distance
):
continue
lowerCamelCase_: Dict = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 584 | 0 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =AutoConfig.from_pretrained(A )
UpperCAmelCase__ =FlaxAutoModelForSeqaSeqLM.from_config(config=A )
UpperCAmelCase__ =checkpoints.load_tax_checkpoint(A )
UpperCAmelCase__ ="wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
UpperCAmelCase__ ="SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
UpperCAmelCase__ ="LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCAmelCase__ ="TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
UpperCAmelCase__ =F"""layers_{str(A )}"""
# Self-Attention
UpperCAmelCase__ =tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
UpperCAmelCase__ =tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
UpperCAmelCase__ =tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
UpperCAmelCase__ =tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCAmelCase__ =tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
UpperCAmelCase__ =tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
UpperCAmelCase__ =tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
UpperCAmelCase__ =tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
UpperCAmelCase__ =tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
UpperCAmelCase__ =tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
UpperCAmelCase__ =tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
UpperCAmelCase__ =flax_model.params["encoder"]["block"][str(A )]["layer"]
UpperCAmelCase__ =tax_attention_key
UpperCAmelCase__ =tax_attention_out
UpperCAmelCase__ =tax_attention_query
UpperCAmelCase__ =tax_attention_value
UpperCAmelCase__ =tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCAmelCase__ =tax_global_layer_norm
if split_mlp_wi:
UpperCAmelCase__ =tax_mlp_wi_a
UpperCAmelCase__ =tax_mlp_wi_a
else:
UpperCAmelCase__ =tax_mlp_wi
UpperCAmelCase__ =tax_mlp_wo
UpperCAmelCase__ =tax_mlp_layer_norm
UpperCAmelCase__ =flax_model_encoder_layer_block
# Only for layer 0:
UpperCAmelCase__ =tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
UpperCAmelCase__ =tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCAmelCase__ =tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
UpperCAmelCase__ =tax_encoder_global_rel_embedding
# Assigning
UpperCAmelCase__ =tax_model["target"]["encoder"]["encoder_norm"]["scale"]
UpperCAmelCase__ =tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
UpperCAmelCase__ =F"""layers_{str(A )}"""
# Self-Attention
UpperCAmelCase__ =tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
UpperCAmelCase__ =tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
UpperCAmelCase__ =tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
UpperCAmelCase__ =tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
UpperCAmelCase__ =tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
UpperCAmelCase__ =tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
UpperCAmelCase__ =tax_enc_dec_attention_module["key"]["kernel"]
UpperCAmelCase__ =tax_enc_dec_attention_module["out"]["kernel"]
UpperCAmelCase__ =tax_enc_dec_attention_module["query"]["kernel"]
UpperCAmelCase__ =tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
UpperCAmelCase__ =tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
UpperCAmelCase__ =tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
UpperCAmelCase__ =tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
UpperCAmelCase__ =tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
UpperCAmelCase__ =tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
UpperCAmelCase__ =tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
UpperCAmelCase__ =flax_model.params["decoder"]["block"][str(A )]["layer"]
UpperCAmelCase__ =tax_attention_key
UpperCAmelCase__ =tax_attention_out
UpperCAmelCase__ =tax_attention_query
UpperCAmelCase__ =tax_attention_value
UpperCAmelCase__ =tax_pre_attention_layer_norm
UpperCAmelCase__ =tax_enc_dec_attention_key
UpperCAmelCase__ =tax_enc_dec_attention_out
UpperCAmelCase__ =tax_enc_dec_attention_query
UpperCAmelCase__ =tax_enc_dec_attention_value
UpperCAmelCase__ =tax_cross_layer_norm
if split_mlp_wi:
UpperCAmelCase__ =tax_mlp_wi_a
UpperCAmelCase__ =tax_mlp_wi_a
else:
UpperCAmelCase__ =tax_mlp_wi
UpperCAmelCase__ =tax_mlp_wo
UpperCAmelCase__ =txa_mlp_layer_norm
UpperCAmelCase__ =flax_model_decoder_layer_block
# Decoder Normalization
UpperCAmelCase__ =tax_model["target"]["decoder"]["decoder_norm"]["scale"]
UpperCAmelCase__ =txa_decoder_norm
# Only for layer 0:
UpperCAmelCase__ =tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
UpperCAmelCase__ =tax_decoder_rel_embedding
# Token Embeddings
UpperCAmelCase__ =tax_model["target"]["token_embedder"]["embedding"]
UpperCAmelCase__ =txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
UpperCAmelCase__ =tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(A )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
UpperCamelCase_ = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 625 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'donut-swin'
__UpperCamelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self, A_=224, A_=4, A_=3, A_=96, A_=[2, 2, 6, 2], A_=[3, 6, 12, 24], A_=7, A_=4.0, A_=True, A_=0.0, A_=0.0, A_=0.1, A_="gelu", A_=False, A_=0.02, A_=1E-5, **A_, ) -> List[str]:
super().__init__(**A_ )
UpperCAmelCase__ =image_size
UpperCAmelCase__ =patch_size
UpperCAmelCase__ =num_channels
UpperCAmelCase__ =embed_dim
UpperCAmelCase__ =depths
UpperCAmelCase__ =len(A_ )
UpperCAmelCase__ =num_heads
UpperCAmelCase__ =window_size
UpperCAmelCase__ =mlp_ratio
UpperCAmelCase__ =qkv_bias
UpperCAmelCase__ =hidden_dropout_prob
UpperCAmelCase__ =attention_probs_dropout_prob
UpperCAmelCase__ =drop_path_rate
UpperCAmelCase__ =hidden_act
UpperCAmelCase__ =use_absolute_embeddings
UpperCAmelCase__ =layer_norm_eps
UpperCAmelCase__ =initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase__ =int(embed_dim * 2 ** (len(A_ ) - 1) )
| 625 | 1 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : int ):
'''simple docstring'''
if not isinstance(A__ , A__ ):
lowerCAmelCase_ : str = f'Input value of [number={number}] must be an integer'
raise TypeError(A__ )
if number < 0:
return False
lowerCAmelCase_ : Optional[int] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 398 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : int=13 , lowerCamelCase : Tuple=7 , lowerCamelCase : List[Any]=True , lowerCamelCase : List[str]=True , lowerCamelCase : Any=False , lowerCamelCase : str=True , lowerCamelCase : int=99 , lowerCamelCase : str=32 , lowerCamelCase : Dict=5 , lowerCamelCase : List[Any]=4 , lowerCamelCase : List[Any]=37 , lowerCamelCase : str="gelu" , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : str=5_12 , lowerCamelCase : Optional[int]=16 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Optional[Any]=0.02 , lowerCamelCase : Tuple=3 , lowerCamelCase : Optional[int]=4 , lowerCamelCase : List[str]=None , ) -> int:
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : Tuple = batch_size
lowerCAmelCase_ : Union[str, Any] = seq_length
lowerCAmelCase_ : Union[str, Any] = is_training
lowerCAmelCase_ : Union[str, Any] = use_input_mask
lowerCAmelCase_ : Dict = use_token_type_ids
lowerCAmelCase_ : Tuple = use_labels
lowerCAmelCase_ : Optional[int] = vocab_size
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Optional[int] = intermediate_size
lowerCAmelCase_ : Tuple = hidden_act
lowerCAmelCase_ : Dict = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Dict = type_vocab_size
lowerCAmelCase_ : Dict = type_sequence_label_size
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : Union[str, Any] = num_labels
lowerCAmelCase_ : Optional[Any] = num_choices
lowerCAmelCase_ : Optional[Any] = scope
def __lowercase ( self : str ) -> str:
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Optional[Any] = None
if self.use_input_mask:
lowerCAmelCase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : int = None
if self.use_labels:
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self : Union[str, Any] ) -> str:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __lowercase ( self : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : Dict ) -> Tuple:
lowerCAmelCase_ : List[Any] = DistilBertModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : Optional[int] = model(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : str = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Dict , lowerCamelCase : Dict , lowerCamelCase : Optional[int] ) -> List[Any]:
lowerCAmelCase_ : Tuple = DistilBertForMaskedLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : Tuple = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : str , lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] ) -> Optional[int]:
lowerCAmelCase_ : Optional[int] = DistilBertForQuestionAnswering(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : Optional[int] = model(
lowerCamelCase , attention_mask=lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase ( self : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ) -> Any:
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : List[Any] = DistilBertForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : str = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : Optional[int] ) -> int:
lowerCAmelCase_ : Tuple = self.num_labels
lowerCAmelCase_ : Optional[int] = DistilBertForTokenClassification(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : Dict = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : str ) -> Union[str, Any]:
lowerCAmelCase_ : List[Any] = self.num_choices
lowerCAmelCase_ : Optional[Any] = DistilBertForMultipleChoice(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : str = model(
lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase ( self : Optional[int] ) -> Tuple:
lowerCAmelCase_ : str = self.prepare_config_and_inputs()
((lowerCAmelCase_), (lowerCAmelCase_), (lowerCAmelCase_), (lowerCAmelCase_), (lowerCAmelCase_), (lowerCAmelCase_)) : Dict = config_and_inputs
lowerCAmelCase_ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,unittest.TestCase):
"""simple docstring"""
lowercase = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = True
lowercase = True
lowercase = True
def __lowercase ( self : str ) -> List[str]:
lowerCAmelCase_ : List[str] = DistilBertModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=lowerCamelCase , dim=37 )
def __lowercase ( self : int ) -> str:
self.config_tester.run_common_tests()
def __lowercase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCamelCase )
def __lowercase ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCamelCase )
def __lowercase ( self : Optional[Any] ) -> int:
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCamelCase )
def __lowercase ( self : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCamelCase )
def __lowercase ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCamelCase )
def __lowercase ( self : Optional[int] ) -> Any:
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCamelCase )
@slow
def __lowercase ( self : Tuple ) -> Optional[int]:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Any = DistilBertModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@slow
@require_torch_gpu
def __lowercase ( self : Tuple ) -> List[Any]:
lowerCAmelCase_, lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowerCAmelCase_ : Union[str, Any] = True
lowerCAmelCase_ : List[str] = model_class(config=lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = torch.jit.trace(
lowerCamelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCamelCase , os.path.join(lowerCamelCase , """traced_model.pt""" ) )
lowerCAmelCase_ : List[str] = torch.jit.load(os.path.join(lowerCamelCase , """traced_model.pt""" ) , map_location=lowerCamelCase )
loaded(inputs_dict["""input_ids"""].to(lowerCamelCase ) , inputs_dict["""attention_mask"""].to(lowerCamelCase ) )
@require_torch
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@slow
def __lowercase ( self : str ) -> str:
lowerCAmelCase_ : int = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
lowerCAmelCase_ : str = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
lowerCAmelCase_ : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(lowerCamelCase , attention_mask=lowerCamelCase )[0]
lowerCAmelCase_ : Tuple = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , lowerCamelCase )
lowerCAmelCase_ : Optional[int] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase , atol=1E-4 ) )
| 398 | 1 |
"""simple docstring"""
import logging
from transformers import PretrainedConfig
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'bertabs'
def __init__( self , lowercase=30_522 , lowercase=512 , lowercase=6 , lowercase=512 , lowercase=8 , lowercase=512 , lowercase=0.2 , lowercase=6 , lowercase=768 , lowercase=8 , lowercase=2_048 , lowercase=0.2 , **lowercase , ) -> Optional[Any]:
super().__init__(**lowercase )
lowerCAmelCase = vocab_size
lowerCAmelCase = max_pos
lowerCAmelCase = enc_layers
lowerCAmelCase = enc_hidden_size
lowerCAmelCase = enc_heads
lowerCAmelCase = enc_ff_size
lowerCAmelCase = enc_dropout
lowerCAmelCase = dec_layers
lowerCAmelCase = dec_hidden_size
lowerCAmelCase = dec_heads
lowerCAmelCase = dec_ff_size
lowerCAmelCase = dec_dropout
| 532 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 532 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CycleDiffusionPipeline
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
SCREAMING_SNAKE_CASE_ = PipelineTesterMixin.required_optional_params - {'latents'}
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
SCREAMING_SNAKE_CASE_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__lowerCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__lowerCamelCase = CLIPTextModel(_snake_case )
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _lowerCamelCase ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
__lowerCamelCase = image / 2 + 0.5
if str(_snake_case ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(_snake_case )
else:
__lowerCamelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__lowerCamelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = CycleDiffusionPipeline(**_snake_case )
__lowerCamelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowerCamelCase = self.get_dummy_inputs(_snake_case )
__lowerCamelCase = pipe(**_snake_case )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_snake_case , '''half''' ):
__lowerCamelCase = module.half()
__lowerCamelCase = CycleDiffusionPipeline(**_snake_case )
__lowerCamelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowerCamelCase = self.get_dummy_inputs(_snake_case )
__lowerCamelCase = pipe(**_snake_case )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def _lowerCamelCase ( self ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def _lowerCamelCase ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def _lowerCamelCase ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__lowerCamelCase = init_image.resize((5_12, 5_12) )
__lowerCamelCase = '''CompVis/stable-diffusion-v1-4'''
__lowerCamelCase = DDIMScheduler.from_pretrained(_snake_case , subfolder='''scheduler''' )
__lowerCamelCase = CycleDiffusionPipeline.from_pretrained(
_snake_case , scheduler=_snake_case , safety_checker=_snake_case , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
__lowerCamelCase = '''A black colored car'''
__lowerCamelCase = '''A blue colored car'''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=_snake_case , source_prompt=_snake_case , image=_snake_case , num_inference_steps=1_00 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=_snake_case , output_type='''np''' , )
__lowerCamelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__lowerCamelCase = init_image.resize((5_12, 5_12) )
__lowerCamelCase = '''CompVis/stable-diffusion-v1-4'''
__lowerCamelCase = DDIMScheduler.from_pretrained(_snake_case , subfolder='''scheduler''' )
__lowerCamelCase = CycleDiffusionPipeline.from_pretrained(_snake_case , scheduler=_snake_case , safety_checker=_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
__lowerCamelCase = '''A black colored car'''
__lowerCamelCase = '''A blue colored car'''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=_snake_case , source_prompt=_snake_case , image=_snake_case , num_inference_steps=1_00 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=_snake_case , output_type='''np''' , )
__lowerCamelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 717 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CycleDiffusionPipeline
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
SCREAMING_SNAKE_CASE_ = PipelineTesterMixin.required_optional_params - {'latents'}
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
SCREAMING_SNAKE_CASE_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__lowerCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__lowerCamelCase = CLIPTextModel(_snake_case )
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _lowerCamelCase ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
__lowerCamelCase = image / 2 + 0.5
if str(_snake_case ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(_snake_case )
else:
__lowerCamelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__lowerCamelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = CycleDiffusionPipeline(**_snake_case )
__lowerCamelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowerCamelCase = self.get_dummy_inputs(_snake_case )
__lowerCamelCase = pipe(**_snake_case )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_snake_case , '''half''' ):
__lowerCamelCase = module.half()
__lowerCamelCase = CycleDiffusionPipeline(**_snake_case )
__lowerCamelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowerCamelCase = self.get_dummy_inputs(_snake_case )
__lowerCamelCase = pipe(**_snake_case )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def _lowerCamelCase ( self ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def _lowerCamelCase ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def _lowerCamelCase ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__lowerCamelCase = init_image.resize((5_12, 5_12) )
__lowerCamelCase = '''CompVis/stable-diffusion-v1-4'''
__lowerCamelCase = DDIMScheduler.from_pretrained(_snake_case , subfolder='''scheduler''' )
__lowerCamelCase = CycleDiffusionPipeline.from_pretrained(
_snake_case , scheduler=_snake_case , safety_checker=_snake_case , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
__lowerCamelCase = '''A black colored car'''
__lowerCamelCase = '''A blue colored car'''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=_snake_case , source_prompt=_snake_case , image=_snake_case , num_inference_steps=1_00 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=_snake_case , output_type='''np''' , )
__lowerCamelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__lowerCamelCase = init_image.resize((5_12, 5_12) )
__lowerCamelCase = '''CompVis/stable-diffusion-v1-4'''
__lowerCamelCase = DDIMScheduler.from_pretrained(_snake_case , subfolder='''scheduler''' )
__lowerCamelCase = CycleDiffusionPipeline.from_pretrained(_snake_case , scheduler=_snake_case , safety_checker=_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
__lowerCamelCase = '''A black colored car'''
__lowerCamelCase = '''A blue colored car'''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=_snake_case , source_prompt=_snake_case , image=_snake_case , num_inference_steps=1_00 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=_snake_case , output_type='''np''' , )
__lowerCamelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 575 | 0 |
'''simple docstring'''
def __lowercase (_lowercase, _lowercase ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
__lowerCamelCase : Optional[int] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_lowercase ) )
return round(_lowercase, ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 150 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __lowercase () -> str:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowerCamelCase : List[Any] = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching, """os.path.join""", _lowercase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj )
assert isinstance(_test_patching.os.path, _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __lowercase () -> str:
"""simple docstring"""
assert _test_patching.open is open
__lowerCamelCase : List[str] = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, """open""", _lowercase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __lowercase () -> Dict:
"""simple docstring"""
# pandas.read_csv is not present in _test_patching
__lowerCamelCase : List[str] = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching, """pandas.read_csv""", _lowercase ):
pass
def __lowercase () -> Union[str, Any]:
"""simple docstring"""
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__lowerCamelCase : Tuple = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, """len""", _lowercase ) is None
with patch_submodule(_test_patching, """len""", _lowercase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __lowercase () -> List[str]:
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = """__test_patch_submodule_start_and_stop_mock__"""
__lowerCamelCase : Dict = patch_submodule(_test_patching, """open""", _lowercase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __lowercase () -> List[Any]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowerCamelCase : Dict = """__test_patch_submodule_successive_join__"""
__lowerCamelCase : List[str] = """__test_patch_submodule_successive_dirname__"""
__lowerCamelCase : Dict = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, """os.path.join""", _lowercase ):
with patch_submodule(_test_patching, """os.rename""", _lowercase ):
with patch_submodule(_test_patching, """os.path.dirname""", _lowercase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, """os.rename""", _lowercase ):
with patch_submodule(_test_patching, """os.path.join""", _lowercase ):
with patch_submodule(_test_patching, """os.path.dirname""", _lowercase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __lowercase () -> List[Any]:
"""simple docstring"""
__lowerCamelCase : List[Any] = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching, """__module_that_doesn_exist__.__attribute_that_doesn_exist__""", _lowercase ):
pass
with patch_submodule(_test_patching, """os.__attribute_that_doesn_exist__""", _lowercase ):
pass
| 150 | 1 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase__ : int = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = logging.get_verbosity()
UpperCamelCase__ : Union[str, Any] = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
UpperCamelCase__ : Optional[int] = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__SCREAMING_SNAKE_CASE ) as cl:
logger.warning(__SCREAMING_SNAKE_CASE )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__SCREAMING_SNAKE_CASE ) as cl:
logger.warning(__SCREAMING_SNAKE_CASE )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__SCREAMING_SNAKE_CASE ) as cl:
logger.warning(__SCREAMING_SNAKE_CASE )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(__SCREAMING_SNAKE_CASE )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase__ : Tuple = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
UpperCamelCase__ : Dict = os.getenv('''TRANSFORMERS_VERBOSITY''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = logging.log_levels[env_level_str]
UpperCamelCase__ : Any = logging.get_verbosity()
self.assertEqual(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCamelCase__ : Any = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase__ : List[Any] = logging.logging.getLogger()
with CaptureLogger(__SCREAMING_SNAKE_CASE ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase__ : Optional[Any] = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
UpperCamelCase__ : Any = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(__SCREAMING_SNAKE_CASE ) as cl:
logger.warning_advice(__SCREAMING_SNAKE_CASE )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__SCREAMING_SNAKE_CASE ) as cl:
logger.warning_advice(__SCREAMING_SNAKE_CASE )
self.assertEqual(cl.out , msg + '''\n''' )
def SCREAMING_SNAKE_CASE_ ( ):
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 462 |
class _lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = ''''''
UpperCamelCase__ : int = ''''''
UpperCamelCase__ : Optional[int] = []
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
UpperCamelCase__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
UpperCamelCase__ : int = self.__min_dist_top_down_dp(__SCREAMING_SNAKE_CASE , n - 1 )
UpperCamelCase__ : Tuple = self.__min_dist_top_down_dp(m - 1 , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
UpperCamelCase__ : Optional[int] = 1 + min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self.dp[m][n]
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = worda
UpperCamelCase__ : Any = worda
UpperCamelCase__ : List[str] = [[-1 for _ in range(len(__SCREAMING_SNAKE_CASE ) )] for _ in range(len(__SCREAMING_SNAKE_CASE ) )]
return self.__min_dist_top_down_dp(len(__SCREAMING_SNAKE_CASE ) - 1 , len(__SCREAMING_SNAKE_CASE ) - 1 )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ : int = worda
UpperCamelCase__ : List[str] = worda
UpperCamelCase__ : int = len(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
UpperCamelCase__ : List[Any] = j
elif j == 0: # second string is empty
UpperCamelCase__ : Union[str, Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
UpperCamelCase__ : List[Any] = self.dp[i - 1][j - 1]
else:
UpperCamelCase__ : Union[str, Any] = self.dp[i][j - 1]
UpperCamelCase__ : str = self.dp[i - 1][j]
UpperCamelCase__ : Union[str, Any] = self.dp[i - 1][j - 1]
UpperCamelCase__ : List[Any] = 1 + min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self.dp[m][n]
if __name__ == "__main__":
lowerCamelCase =EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
lowerCamelCase =input("Enter the first string: ").strip()
lowerCamelCase =input("Enter the second string: ").strip()
print()
print(F'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(F'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 462 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowercase = logging.get_logger(__name__)
def snake_case__ ( _A: Dict , _A: Any ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = b.T
lowerCAmelCase = np.sum(np.square(_A ) , axis=1 )
lowerCAmelCase = np.sum(np.square(_A ) , axis=0 )
lowerCAmelCase = np.matmul(_A , _A )
lowerCAmelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def snake_case__ ( _A: Dict , _A: Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = x.reshape(-1 , 3 )
lowerCAmelCase = squared_euclidean_distance(_A , _A )
return np.argmin(_A , axis=1 )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = ['''pixel_values''']
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BILINEAR , __lowerCAmelCase = True , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase)
lowerCAmelCase = size if size is not None else {"""height""": 256, """width""": 256}
lowerCAmelCase = get_size_dict(__lowerCAmelCase)
lowerCAmelCase = np.array(__lowerCAmelCase) if clusters is not None else None
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_normalize
lowerCAmelCase = do_color_quantize
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = PILImageResampling.BILINEAR , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(__lowerCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(f"Size dictionary must contain both height and width keys. Got {size.keys()}")
return resize(
__lowerCAmelCase , size=(size["""height"""], size["""width"""]) , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = rescale(image=__lowerCAmelCase , scale=1 / 127.5 , data_format=__lowerCAmelCase)
lowerCAmelCase = image - 1
return image
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(__lowerCAmelCase)
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
lowerCAmelCase = clusters if clusters is not None else self.clusters
lowerCAmelCase = np.array(__lowerCAmelCase)
lowerCAmelCase = make_list_of_images(__lowerCAmelCase)
if not valid_images(__lowerCAmelCase):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""")
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""")
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(__lowerCAmelCase) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=__lowerCAmelCase) for image in images]
if do_color_quantize:
lowerCAmelCase = [to_channel_dimension_format(__lowerCAmelCase , ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
lowerCAmelCase = np.array(__lowerCAmelCase)
lowerCAmelCase = color_quantize(__lowerCAmelCase , __lowerCAmelCase).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
lowerCAmelCase = images.shape[0]
lowerCAmelCase = images.reshape(__lowerCAmelCase , -1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
lowerCAmelCase = list(__lowerCAmelCase)
else:
lowerCAmelCase = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase) for image in images]
lowerCAmelCase = {"""input_ids""": images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase)
| 370 | '''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case__ ( _A: Dict ) -> str:
'''simple docstring'''
lowerCAmelCase = [False] * len(_A )
lowerCAmelCase = [-1] * len(_A )
def dfs(_A: Dict , _A: Tuple ):
lowerCAmelCase = True
lowerCAmelCase = c
for u in graph[v]:
if not visited[u]:
dfs(_A , 1 - c )
for i in range(len(_A ) ):
if not visited[i]:
dfs(_A , 0 )
for i in range(len(_A ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__lowercase = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 370 | 1 |
'''simple docstring'''
from math import pow
def __UpperCamelCase( _A : int , _A : int , _A : int , _A : int , _A : int , ):
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
UpperCAmelCase__ : str = int(pow(_A , _A ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = backtrack(
_A , _A , current_number + 1 , _A , _A )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
UpperCAmelCase__ , UpperCAmelCase__ : Dict = backtrack(
_A , _A , current_number + 1 , _A , _A )
return current_sum, solutions_count
def __UpperCamelCase( _A : int , _A : int ):
'''simple docstring'''
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(_A , _A , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 496 | '''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __UpperCamelCase( _A : str ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def __UpperCamelCase( _A : int , _A : str ):
'''simple docstring'''
return (-y * np.log(_A ) - (1 - y) * np.log(1 - h )).mean()
def __UpperCamelCase( _A : Optional[Any] , _A : Tuple , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = np.dot(_A , _A )
return np.sum(y * scores - np.log(1 + np.exp(_A ) ) )
def __UpperCamelCase( _A : Dict , _A : Optional[int] , _A : List[Any] , _A : str=7_00_00 ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = np.zeros(x.shape[1] )
for iterations in range(_A ):
UpperCAmelCase__ : Tuple = np.dot(_A , _A )
UpperCAmelCase__ : Optional[Any] = sigmoid_function(_A )
UpperCAmelCase__ : Optional[int] = np.dot(x.T , h - y ) / y.size
UpperCAmelCase__ : List[Any] = theta - alpha * gradient # updating the weights
UpperCAmelCase__ : Tuple = np.dot(_A , _A )
UpperCAmelCase__ : Tuple = sigmoid_function(_A )
UpperCAmelCase__ : Optional[int] = cost_function(_A , _A )
if iterations % 1_00 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase__ : str = datasets.load_iris()
UpperCamelCase__ : Tuple = iris.data[:, :2]
UpperCamelCase__ : str = (iris.target != 0) * 1
UpperCamelCase__ : Any = 0.1
UpperCamelCase__ : List[str] = logistic_reg(alpha, x, y, max_iterations=70_000)
print('theta: ', theta) # printing the theta i.e our weights vector
def __UpperCamelCase( _A : Any ):
'''simple docstring'''
return sigmoid_function(
np.dot(_A , _A ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((UpperCamelCase__) , (UpperCamelCase__)) : Any = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase__) , (UpperCamelCase__)) : Optional[Any] = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase__) , (UpperCamelCase__)) : Any = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase__ : str = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase__ : Union[str, Any] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 496 | 1 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCAmelCase : int = get_tests_dir('fixtures')
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = mock.Mock()
__UpperCAmelCase : Any = 500
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : Tuple = HTTPError
__UpperCAmelCase : Dict = {}
# Download this model to make sure it's in the cache.
__UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=UpperCamelCase ) as mock_head:
__UpperCAmelCase : str = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
with self.assertRaises(UpperCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
__UpperCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(UpperCamelCase )
@is_staging_test
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowerCamelCase__ ( cls : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Dict = TOKEN
HfFolder.save_token(UpperCamelCase )
@classmethod
def lowerCamelCase__ ( cls : Tuple ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(UpperCamelCase )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
__UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase , getattr(UpperCamelCase , UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCamelCase , repo_id="""test-image-processor""" , push_to_hub=UpperCamelCase , use_auth_token=self._token )
__UpperCAmelCase : Optional[Any] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase , getattr(UpperCamelCase , UpperCamelCase ) )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = ViTImageProcessor.from_pretrained(UpperCamelCase )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
__UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase , getattr(UpperCamelCase , UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCamelCase , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=UpperCamelCase , use_auth_token=self._token )
__UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase , getattr(UpperCamelCase , UpperCamelCase ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
__UpperCAmelCase : Tuple = CustomImageProcessor.from_pretrained(UpperCamelCase )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
__UpperCAmelCase : List[str] = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 139 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Any = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(_UpperCamelCase )
# Let's go
__UpperCAmelCase : int = parser.parse_args()
if not hasattr(_UpperCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : List[str] = args.func(_UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 139 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__: Any = logging.get_logger(__name__)
A__: List[str] = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = "xlm"
__UpperCamelCase : List[str] = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :int=3_0_1_4_5 , SCREAMING_SNAKE_CASE :List[str]=2_0_4_8 , SCREAMING_SNAKE_CASE :str=1_2 , SCREAMING_SNAKE_CASE :Tuple=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[Any]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :List[Any]=False , SCREAMING_SNAKE_CASE :Optional[int]=False , SCREAMING_SNAKE_CASE :str=1 , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :int=5_1_2 , SCREAMING_SNAKE_CASE :Any=2_0_4_8**-0.5 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Dict=0 , SCREAMING_SNAKE_CASE :Tuple=1 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[int]=3 , SCREAMING_SNAKE_CASE :Dict=5 , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :List[Any]="first" , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Tuple=0.1 , SCREAMING_SNAKE_CASE :List[str]=5 , SCREAMING_SNAKE_CASE :List[str]=5 , SCREAMING_SNAKE_CASE :Tuple=0 , SCREAMING_SNAKE_CASE :Tuple=0 , SCREAMING_SNAKE_CASE :Any=2 , SCREAMING_SNAKE_CASE :Optional[int]=0 , **SCREAMING_SNAKE_CASE :Tuple , ) -> List[str]:
'''simple docstring'''
_a : Tuple =vocab_size
_a : int =emb_dim
_a : Dict =n_layers
_a : List[Any] =n_heads
_a : str =dropout
_a : Tuple =attention_dropout
_a : Dict =gelu_activation
_a : Any =sinusoidal_embeddings
_a : str =causal
_a : str =asm
_a : Tuple =n_langs
_a : str =use_lang_emb
_a : Dict =layer_norm_eps
_a : Union[str, Any] =bos_index
_a : int =eos_index
_a : Optional[int] =pad_index
_a : List[Any] =unk_index
_a : int =mask_index
_a : Any =is_encoder
_a : Tuple =max_position_embeddings
_a : Optional[Any] =embed_init_std
_a : List[Any] =init_std
_a : str =summary_type
_a : Optional[int] =summary_use_proj
_a : List[str] =summary_activation
_a : Tuple =summary_proj_to_labels
_a : List[Any] =summary_first_dropout
_a : Union[str, Any] =start_n_top
_a : Optional[int] =end_n_top
_a : List[Any] =mask_token_id
_a : List[Any] =lang_id
if "n_words" in kwargs:
_a : Dict =kwargs["""n_words"""]
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A__ ( UpperCAmelCase__ ):
@property
def __UpperCAmelCase ( self :Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_a : Optional[Any] ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_a : Tuple ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 506 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = (DPMSolverSinglestepScheduler,)
__UpperCamelCase : str = (("num_inference_steps", 25),)
def __UpperCAmelCase ( self :Optional[Any] , **SCREAMING_SNAKE_CASE :int ) -> str:
'''simple docstring'''
_a : Optional[Any] ={
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float("""inf""" ),
"""variance_type""": None,
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Optional[Any]=0 , **SCREAMING_SNAKE_CASE :str ) -> Any:
'''simple docstring'''
_a : Any =dict(self.forward_default_kwargs )
_a : Any =kwargs.pop("""num_inference_steps""" , SCREAMING_SNAKE_CASE )
_a : Tuple =self.dummy_sample
_a : Optional[Any] =0.1 * sample
_a : Dict =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_a : Tuple =self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
_a : Optional[Any] =scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
_a : str =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
_a : Dict =scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
_a : List[str] =dummy_past_residuals[: new_scheduler.config.solver_order]
_a , _a : str =sample, sample
for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
_a : Optional[int] =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
_a : Union[str, Any] =new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Union[str, Any]=0 , **SCREAMING_SNAKE_CASE :str ) -> Union[str, Any]:
'''simple docstring'''
_a : List[str] =dict(self.forward_default_kwargs )
_a : Dict =kwargs.pop("""num_inference_steps""" , SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =self.dummy_sample
_a : int =0.1 * sample
_a : Optional[int] =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_a : Optional[int] =self.get_scheduler_config()
_a : str =scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
_a : Union[str, Any] =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
_a : Any =dummy_past_residuals[: new_scheduler.config.solver_order]
_a : Optional[int] =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
_a : Tuple =new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :List[Any] ) -> Any:
'''simple docstring'''
if scheduler is None:
_a : int =self.scheduler_classes[0]
_a : int =self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
_a : int =scheduler_class(**SCREAMING_SNAKE_CASE )
_a : List[str] =self.scheduler_classes[0]
_a : Union[str, Any] =self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
_a : Optional[Any] =scheduler_class(**SCREAMING_SNAKE_CASE )
_a : List[str] =1_0
_a : Optional[Any] =self.dummy_model()
_a : int =self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
_a : str =model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Dict =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
return sample
def __UpperCAmelCase ( self :List[Any] ) -> Tuple:
'''simple docstring'''
_a : int =DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_a : List[Any] =5_0
_a : Optional[Any] =self.dummy_model()
_a : List[Any] =self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[int] =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
_a : Optional[int] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_574 ) < 1e-3
def __UpperCAmelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_a : List[str] =DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_a : List[Any] =self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
_a : str =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
_a : Dict =DEISMultistepScheduler.from_config(scheduler.config )
_a : Union[str, Any] =DPMSolverMultistepScheduler.from_config(scheduler.config )
_a : str =UniPCMultistepScheduler.from_config(scheduler.config )
_a : Optional[Any] =DPMSolverSinglestepScheduler.from_config(scheduler.config )
_a : Dict =self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
_a : List[str] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
def __UpperCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , algorithm_type="""dpmsolver++""" , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :Tuple ) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict ) -> str:
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , algorithm_type=SCREAMING_SNAKE_CASE , )
_a : List[Any] =self.full_loop(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , algorithm_type=SCREAMING_SNAKE_CASE , )
assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict ) -> str:
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def __UpperCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE )
self.check_over_configs(variance_type="""learned_range""" )
def __UpperCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 )
def __UpperCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
_a : List[Any] =self.full_loop()
_a : Any =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
def __UpperCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
_a : Dict =self.full_loop(use_karras_sigmas=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_248 ) < 1e-3
def __UpperCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
_a : Optional[int] =self.full_loop(prediction_type="""v_prediction""" )
_a : Optional[Any] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1_453 ) < 1e-3
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
_a : List[Any] =self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=SCREAMING_SNAKE_CASE )
_a : Dict =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.0_649 ) < 1e-3
def __UpperCAmelCase ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_a : Dict =self.scheduler_classes[0]
_a : str =self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
_a : Optional[int] =scheduler_class(**SCREAMING_SNAKE_CASE )
_a : Optional[Any] =1_0
_a : Any =self.dummy_model()
_a : int =self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
_a : Tuple =model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Dict =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
| 506 | 1 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__A : Any = logging.get_logger(__name__)
def lowercase ( __snake_case : Any , __snake_case : Union[str, Any] ):
try:
with open(UpperCAmelCase_ , '''rb''' ) as flax_state_f:
lowercase_ : List[str] = from_bytes(UpperCAmelCase_ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(UpperCAmelCase_ ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowercase_ : Optional[Any] = flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , UpperCAmelCase_ ) ).values()
if any(UpperCAmelCase_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowercase_ : List[Any] = jax.tree_util.tree_map(
lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCAmelCase_ )
lowercase_ : Optional[Any] = ''''''
lowercase_ : Optional[int] = flatten_dict(UpperCAmelCase_ , sep='''.''' )
lowercase_ : str = pt_model.state_dict()
# keep track of unexpected & missing keys
lowercase_ : List[Any] = []
lowercase_ : Any = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowercase_ : List[Any] = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowercase_ : Optional[int] = flax_key_tuple_array[:-1] + ['''weight''']
lowercase_ : List[str] = jnp.transpose(UpperCAmelCase_ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowercase_ : str = flax_key_tuple_array[:-1] + ['''weight''']
lowercase_ : List[str] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowercase_ : Tuple = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(UpperCAmelCase_ ):
lowercase_ : Optional[Any] = (
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
lowercase_ : Union[str, Any] = '''.'''.join(UpperCAmelCase_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowercase_ : List[Any] = np.asarray(UpperCAmelCase_ ) if not isinstance(UpperCAmelCase_ , np.ndarray ) else flax_tensor
lowercase_ : int = torch.from_numpy(UpperCAmelCase_ )
# remove from missing keys
missing_keys.remove(UpperCAmelCase_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(UpperCAmelCase_ )
pt_model.load_state_dict(UpperCAmelCase_ )
# re-transform missing_keys to list
lowercase_ : str = list(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(UpperCAmelCase_ ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
''' use it for predictions and inference.''' )
return pt_model
| 231 |
'''simple docstring'''
import os
def __lowerCamelCase ( UpperCAmelCase_ = "input.txt" ) ->int:
with open(os.path.join(os.path.dirname(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) as input_file:
snake_case__ = [
[int(UpperCAmelCase_ ) for element in line.split(',' )]
for line in input_file.readlines()
]
snake_case__ = len(UpperCAmelCase_ )
snake_case__ = len(matrix[0] )
snake_case__ = [[-1 for _ in range(UpperCAmelCase_ )] for _ in range(UpperCAmelCase_ )]
for i in range(UpperCAmelCase_ ):
snake_case__ = matrix[i][0]
for j in range(1 , UpperCAmelCase_ ):
for i in range(UpperCAmelCase_ ):
snake_case__ = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , UpperCAmelCase_ ):
snake_case__ = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
snake_case__ = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 368 | 0 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__A : List[Any] = logging.get_logger(__name__)
class lowercase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase__ = ["audio_values", "audio_mask"]
def __init__( self : List[str] , __lowerCamelCase : Tuple=2048 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : Optional[int]=[16, 16] , __lowerCamelCase : List[Any]=128 , __lowerCamelCase : List[Any]=44100 , __lowerCamelCase : Any=86 , __lowerCamelCase : Dict=2048 , __lowerCamelCase : List[Any]=0.0 , **__lowerCamelCase : Optional[int] , ) -> Any:
'''simple docstring'''
super().__init__(
feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , **__lowerCamelCase , )
lowerCamelCase__ = spectrogram_length
lowerCamelCase__ = num_channels
lowerCamelCase__ = patch_size
lowerCamelCase__ = feature_size // self.patch_size[1]
lowerCamelCase__ = n_fft
lowerCamelCase__ = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase__ = sampling_rate
lowerCamelCase__ = padding_value
lowerCamelCase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCamelCase , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=__lowerCamelCase , norm="slaney" , mel_scale="slaney" , ).T
def a__ ( self : str , __lowerCamelCase : np.array ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = spectrogram(
__lowerCamelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
lowerCamelCase__ = log_spec[:, :-1]
lowerCamelCase__ = log_spec - 20.0
lowerCamelCase__ = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Optional[int] , __lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = True , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , **__lowerCamelCase : Tuple , ) -> Tuple:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCamelCase__ = isinstance(__lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCamelCase__ = is_batched_numpy or (
isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ):
lowerCamelCase__ = np.asarray(__lowerCamelCase , dtype=np.floataa )
elif isinstance(__lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __lowerCamelCase ):
lowerCamelCase__ = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase__ = np.array(__lowerCamelCase ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase__ = np.ones([len(__lowerCamelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase__ = padded_audio_features * self.padding_value
for i in range(len(__lowerCamelCase ) ):
lowerCamelCase__ = audio_features[i]
lowerCamelCase__ = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase__ = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
lowerCamelCase__ = {"audio_values": padded_audio_features}
lowerCamelCase__ = BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
return encoded_inputs
| 705 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase_ ( lowercase__):
if not isinstance(lowercase__ , lowercase__):
raise TypeError("Undefined for non-integers")
elif precision < 1:
raise ValueError("Undefined for non-natural numbers")
lowerCamelCase__ = precision
lowerCamelCase__ = ceil(precision / 14)
lowerCamelCase__ = 426880 * Decimal(10005).sqrt()
lowerCamelCase__ = 1
lowerCamelCase__ = 13591409
lowerCamelCase__ = Decimal(lowercase__)
for k in range(1 , lowercase__):
lowerCamelCase__ = factorial(6 * k) // (factorial(3 * k) * factorial(lowercase__) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term) / exponential_term
return str(constant_term / partial_sum)[:-1]
if __name__ == "__main__":
__A : Dict = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 187 | 0 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = IFInpaintingPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self ):
return self._get_dummy_components()
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(_UpperCAmelCase ).startswith('''mps''' ):
__a : str = torch.manual_seed(_UpperCAmelCase )
else:
__a : List[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__a : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
__a : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
__a : Optional[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _lowerCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def _lowerCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _lowerCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _lowerCamelCase ( self ):
self._test_save_load_local()
def _lowerCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , ) | 52 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''levit'''
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=16 , _UpperCAmelCase=[128, 256, 384] , _UpperCAmelCase=[4, 8, 12] , _UpperCAmelCase=[4, 4, 4] , _UpperCAmelCase=[16, 16, 16] , _UpperCAmelCase=0 , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = image_size
__a : List[Any] = num_channels
__a : Dict = kernel_size
__a : Optional[int] = stride
__a : Optional[int] = padding
__a : Dict = hidden_sizes
__a : int = num_attention_heads
__a : Optional[int] = depths
__a : str = key_dim
__a : Union[str, Any] = drop_path_rate
__a : Optional[Any] = patch_size
__a : Tuple = attention_ratio
__a : int = mlp_ratio
__a : int = initializer_range
__a : int = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCamelCase ( self ):
return 1e-4 | 52 | 1 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ = 16
lowercase_ = 32
def lowerCAmelCase ( UpperCAmelCase : Tuple, UpperCAmelCase : Optional[int] = 16 ) ->Any:
"""simple docstring"""
__magic_name__ : List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__magic_name__ : Tuple = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(UpperCAmelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ : Any = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=UpperCAmelCase, max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__magic_name__ : Tuple = datasets.map(
UpperCAmelCase, batched=UpperCAmelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ : str = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(UpperCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__magic_name__ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__magic_name__ : Any = 16
elif accelerator.mixed_precision != "no":
__magic_name__ : Union[str, Any] = 8
else:
__magic_name__ : Optional[Any] = None
return tokenizer.pad(
UpperCAmelCase, padding='''longest''', max_length=UpperCAmelCase, pad_to_multiple_of=UpperCAmelCase, return_tensors='''pt''', )
# Instantiate dataloaders.
__magic_name__ : List[Any] = DataLoader(
tokenized_datasets['''train'''], shuffle=UpperCAmelCase, collate_fn=UpperCAmelCase, batch_size=UpperCAmelCase, drop_last=UpperCAmelCase )
__magic_name__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''], shuffle=UpperCAmelCase, collate_fn=UpperCAmelCase, batch_size=UpperCAmelCase, drop_last=(accelerator.mixed_precision == '''fp8'''), )
return train_dataloader, eval_dataloader
def lowerCAmelCase ( UpperCAmelCase : Any, UpperCAmelCase : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ : int = config['''lr''']
__magic_name__ : Any = int(config['''num_epochs'''] )
__magic_name__ : List[str] = int(config['''seed'''] )
__magic_name__ : Optional[int] = int(config['''batch_size'''] )
__magic_name__ : Optional[Any] = evaluate.load('''glue''', '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__magic_name__ : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__magic_name__ : List[str] = batch_size // MAX_GPU_BATCH_SIZE
__magic_name__ : str = MAX_GPU_BATCH_SIZE
set_seed(UpperCAmelCase )
__magic_name__ : int = get_dataloaders(UpperCAmelCase, UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__magic_name__ : Any = model.to(accelerator.device )
# Instantiate optimizer
__magic_name__ : int = AdamW(params=model.parameters(), lr=UpperCAmelCase )
# Instantiate scheduler
__magic_name__ : Tuple = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase, num_warmup_steps=100, num_training_steps=(len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ : Optional[Any] = accelerator.prepare(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__magic_name__ : Dict = model(**UpperCAmelCase )
__magic_name__ : Tuple = outputs.loss
__magic_name__ : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ : Dict = model(**UpperCAmelCase )
__magic_name__ : List[str] = outputs.logits.argmax(dim=-1 )
__magic_name__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCAmelCase, references=UpperCAmelCase, )
__magic_name__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''', UpperCAmelCase )
def lowerCAmelCase ( ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=UpperCAmelCase, default=UpperCAmelCase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
__magic_name__ : Dict = parser.parse_args()
__magic_name__ : int = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCAmelCase, UpperCAmelCase )
if __name__ == "__main__":
main()
| 712 |
def lowerCAmelCase ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
__magic_name__ : List[str] = [True] * (num + 1)
__magic_name__ : List[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p, num + 1, UpperCAmelCase ):
__magic_name__ : Any = False
p += 1
return [prime for prime in range(2, num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 336 | 0 |