code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import List
from .keymap import KEYMAP, get_character
def a(lowercase__ ):
'''simple docstring'''
def decorator(lowercase__ ):
snake_case_ = getattr(lowercase__ , 'handle_key' , [] )
handle += [key]
setattr(lowercase__ , 'handle_key' , lowercase__ )
return func
return decorator
def a(*lowercase__ ):
'''simple docstring'''
def decorator(lowercase__ ):
snake_case_ = getattr(lowercase__ , 'handle_key' , [] )
handle += keys
setattr(lowercase__ , 'handle_key' , lowercase__ )
return func
return decorator
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
def __new__( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = super().__new__(cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not hasattr(__UpperCamelCase , 'key_handler' ):
setattr(__UpperCamelCase , 'key_handler' , {} )
setattr(__UpperCamelCase , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
snake_case_ = getattr(__UpperCamelCase , 'handle_key' , [] )
for key in handled_keys:
snake_case_ = value
return new_cls
@staticmethod
def __lowerCAmelCase ( cls ):
"""simple docstring"""
snake_case_ = get_character()
if char != KEYMAP["undefined"]:
snake_case_ = ord(__UpperCamelCase )
snake_case_ = cls.key_handler.get(__UpperCamelCase )
if handler:
snake_case_ = char
return handler(cls )
else:
return None
def a(cls ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 187 |
A = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 187 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
a_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
for attribute in key.split(""".""" ):
snake_case_ : Optional[int] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
snake_case_ : Any = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
snake_case_ : str = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
snake_case_ : Dict = value
elif weight_type == "weight_g":
snake_case_ : Optional[int] = value
elif weight_type == "weight_v":
snake_case_ : Optional[Any] = value
elif weight_type == "bias":
snake_case_ : Union[str, Any] = value
else:
snake_case_ : Dict = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : int = []
snake_case_ : List[Any] = fairseq_model.state_dict()
snake_case_ : Union[str, Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case_ : Any = None
for name, value in fairseq_dict.items():
snake_case_ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , )
snake_case_ : Dict = True
elif name.split(""".""" )[0] == "proj":
snake_case_ : Any = fairseq_model.proj
snake_case_ : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case_ : Tuple = True
if "*" in mapped_key:
snake_case_ : List[Any] = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2]
snake_case_ : List[Any] = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
snake_case_ : Optional[Any] = """weight_g"""
elif "weight_v" in name:
snake_case_ : Any = """weight_v"""
elif "bias" in name:
snake_case_ : Tuple = """bias"""
elif "weight" in name:
snake_case_ : int = """weight"""
else:
snake_case_ : Optional[Any] = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(f'Unused weights: {unused_weights}' )
return proj_weight
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Union[str, Any] = full_name.split("""conv_layers.""" )[-1]
snake_case_ : List[str] = name.split(""".""" )
snake_case_ : Dict = int(items[0] )
snake_case_ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
snake_case_ : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
snake_case_ : Tuple = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
snake_case_ : int = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
snake_case_ : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : List[str] = emb.weight.shape
snake_case_ : Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" ) as f:
snake_case_ : List[Any] = f.readlines()
snake_case_ : List[str] = [line.split(""" """ )[0] for line in lines]
snake_case_ : Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
snake_case_ : str = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(SCREAMING_SNAKE_CASE__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
"""simple docstring"""
snake_case_ : List[Any] = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = SpeechaTextaConfig.from_pretrained(
SCREAMING_SNAKE_CASE__ , vocab_size=SCREAMING_SNAKE_CASE__ , decoder_layers=SCREAMING_SNAKE_CASE__ , do_stable_layer_norm=SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
snake_case_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
snake_case_ : Optional[Any] = model[0].eval()
# set weights for wav2vec2 encoder
snake_case_ : Any = WavaVecaModel(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = SpeechaTextaForCausalLM(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
snake_case_ : Any = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
snake_case_ : Dict = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = False
# add projection layer
snake_case_ : Dict = nn.Parameter(projection_layer.weight )
snake_case_ : List[Any] = nn.Parameter(projection_layer.bias )
snake_case_ : List[Any] = create_vocab_dict(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = SpeechaTextaTokenizer(os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" ) )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = hf_wavavec.config.to_dict()
snake_case_ : Optional[Any] = tokenizer.pad_token_id
snake_case_ : Optional[Any] = tokenizer.bos_token_id
snake_case_ : Union[str, Any] = tokenizer.eos_token_id
snake_case_ : List[str] = """speech_to_text_2"""
snake_case_ : str = """wav2vec2"""
snake_case_ : Optional[Any] = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE__ )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=10224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
a_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 719 |
"""simple docstring"""
from manim import *
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
snake_case_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : str = [mem.copy() for i in range(6 )]
snake_case_ : str = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Any = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = VGroup(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[Any] = Text("""CPU""" , font_size=24 )
snake_case_ : Tuple = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase__ )
snake_case_ : List[Any] = [mem.copy() for i in range(4 )]
snake_case_ : Tuple = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = Text("""GPU""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase__ )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : List[Any] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Dict = Text("""Model""" , font_size=24 )
snake_case_ : int = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
model.move_to([3, -1.0, 0] )
self.add(lowercase__ )
snake_case_ : Dict = []
for i, rect in enumerate(lowercase__ ):
rect.set_stroke(lowercase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
snake_case_ : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase__ , buff=0.0 )
self.add(lowercase__ )
cpu_targs.append(lowercase__ )
snake_case_ : List[str] = [mem.copy() for i in range(6 )]
snake_case_ : List[str] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : str = Text("""Loaded Checkpoint""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , aligned_edge=lowercase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
snake_case_ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ : Union[str, Any] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase__ , lowercase__ )
snake_case_ : List[Any] = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowercase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
snake_case_ : List[Any] = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase__ ) , Write(lowercase__ ) )
self.play(Write(lowercase__ , run_time=1 ) , Create(lowercase__ , run_time=1 ) )
snake_case_ : Optional[int] = []
snake_case_ : List[str] = []
for i, rect in enumerate(lowercase__ ):
snake_case_ : Optional[Any] = fill.copy().set_fill(lowercase__ , opacity=0.7 )
target.move_to(lowercase__ )
first_animations.append(GrowFromCenter(lowercase__ , run_time=1 ) )
snake_case_ : List[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase__ , run_time=1.5 ) )
self.play(*lowercase__ )
self.play(*lowercase__ )
self.wait()
| 48 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 14 |
def __UpperCAmelCase ( __a : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
_a : list = []
for temp in range(int(__a ) ):
series.append(F"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
a__ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 14 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : List[str] = KandinskyVaaImgaImgPipeline
_lowerCamelCase : int = ["""image_embeds""", """negative_image_embeds""", """image"""]
_lowerCamelCase : Any = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
_lowerCamelCase : Any = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowerCamelCase : Dict = False
@property
def lowercase ( self ):
return 32
@property
def lowercase ( self ):
return 32
@property
def lowercase ( self ):
return self.time_input_dim
@property
def lowercase ( self ):
return self.time_input_dim * 4
@property
def lowercase ( self ):
return 100
@property
def lowercase ( self ):
torch.manual_seed(0 )
lowerCAmelCase : List[Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ )
return model
@property
def lowercase ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase ( self ):
torch.manual_seed(0 )
lowerCAmelCase : Any = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase ( self ):
lowerCAmelCase : Optional[int] = self.dummy_unet
lowerCAmelCase : Tuple = self.dummy_movq
lowerCAmelCase : List[Any] = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCAmelCase : List[Any] = DDIMScheduler(**snake_case__ )
lowerCAmelCase : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowercase ( self , snake_case__ , snake_case__=0 ):
lowerCAmelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase : Dict = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) )
if str(snake_case__ ).startswith('mps' ):
lowerCAmelCase : Union[str, Any] = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : Union[str, Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : str = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = 'cpu'
lowerCAmelCase : Optional[int] = self.get_dummy_components()
lowerCAmelCase : Tuple = self.pipeline_class(**snake_case__ )
lowerCAmelCase : str = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
lowerCAmelCase : int = output.images
lowerCAmelCase : Tuple = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowerCAmelCase : str = image[0, -3:, -3:, -1]
lowerCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : Tuple = np.array(
[0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self ):
lowerCAmelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
lowerCAmelCase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCAmelCase : Optional[Any] = 'A red cartoon frog, 4k'
lowerCAmelCase : List[str] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowerCAmelCase : Any = KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
lowerCAmelCase : Optional[int] = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase : Dict = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCAmelCase : Optional[int] = pipeline(
image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 718 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class lowerCAmelCase ( a ):
_lowerCamelCase : List[str] = """xlm-roberta"""
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=1e-1_2 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : List[Any] = layer_norm_eps
lowerCAmelCase : Union[str, Any] = position_embedding_type
lowerCAmelCase : Union[str, Any] = use_cache
lowerCAmelCase : List[str] = classifier_dropout
class lowerCAmelCase ( a ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
lowerCAmelCase : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 646 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""OPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OPTForCausalLM""",
"""OPTModel""",
"""OPTPreTrainedModel""",
"""OPTForSequenceClassification""",
"""OPTForQuestionAnswering""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""FlaxOPTForCausalLM""",
"""FlaxOPTModel""",
"""FlaxOPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 437 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""OPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OPTForCausalLM""",
"""OPTModel""",
"""OPTPreTrainedModel""",
"""OPTForSequenceClassification""",
"""OPTForQuestionAnswering""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""FlaxOPTForCausalLM""",
"""FlaxOPTModel""",
"""FlaxOPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 437 | 1 |
# Algorithm for the pigeonhole sorting
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] ) -> str:
UpperCamelCase__ : Any = min(snake_case_ ) # min() finds the minimum value
UpperCamelCase__ : Dict = max(snake_case_ ) # max() finds the maximum value
UpperCamelCase__ : Optional[int] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
UpperCamelCase__ : Optional[Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(snake_case_ , snake_case_ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
UpperCamelCase__ : Dict = 0
for count in range(snake_case_ ):
while holes[count] > 0:
holes[count] -= 1
UpperCamelCase__ : List[str] = count + min_val
i += 1
def lowerCAmelCase_ ( ) -> int:
UpperCamelCase__ : List[Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(snake_case_ )
print('''Sorted order is:''' , ''' '''.join(snake_case_ ) )
if __name__ == "__main__":
main()
| 719 |
from manim import *
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Any = Rectangle(height=0.5, width=0.5 )
UpperCamelCase__ : Any = Rectangle(height=0.25, width=0.25 )
UpperCamelCase__ : Dict = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
UpperCamelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCamelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCamelCase__ : str = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : List[str] = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : Tuple = VGroup(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : List[str] = Text('''CPU''', font_size=24 )
UpperCamelCase__ : Any = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__magic_name__ )
UpperCamelCase__ : int = [mem.copy() for i in range(4 )]
UpperCamelCase__ : str = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : Tuple = Text('''GPU''', font_size=24 )
UpperCamelCase__ : str = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ )
gpu.move_to([-1, -1, 0] )
self.add(__magic_name__ )
UpperCamelCase__ : str = [mem.copy() for i in range(6 )]
UpperCamelCase__ : Tuple = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : Tuple = Text('''Model''', font_size=24 )
UpperCamelCase__ : Tuple = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ )
model.move_to([3, -1.0, 0] )
self.add(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : List[Any] = []
for i, rect in enumerate(__magic_name__ ):
rect.set_stroke(__magic_name__ )
UpperCamelCase__ : Tuple = Rectangle(height=0.46 / 4, width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__magic_name__, opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=__magic_name__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0], direction=__magic_name__, buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1], direction=__magic_name__, buff=0.0 )
self.add(__magic_name__ )
model_cpu_arr.append(__magic_name__ )
self.add(*__magic_name__, *__magic_name__, *__magic_name__ )
UpperCamelCase__ : Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase__ : Dict = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : List[str] = Text('''Loaded Checkpoint''', font_size=24 )
UpperCamelCase__ : Optional[Any] = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(__magic_name__ )
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : List[str] = []
for i, rect in enumerate(__magic_name__ ):
UpperCamelCase__ : Optional[int] = fill.copy().set_fill(__magic_name__, opacity=0.7 )
target.move_to(__magic_name__ )
ckpt_arr.append(__magic_name__ )
UpperCamelCase__ : int = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__magic_name__ )
self.add(*__magic_name__, *__magic_name__ )
UpperCamelCase__ : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase__ : List[str] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model", font_size=18, )
key_text.move_to([-5, 2.4, 0] )
self.add(__magic_name__, __magic_name__ )
UpperCamelCase__ : Any = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint", font_size=18, )
blue_text.next_to(__magic_name__, DOWN * 2.4, aligned_edge=key_text.get_left() )
self.add(__magic_name__ )
UpperCamelCase__ : Dict = MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.", font_size=24, )
step_a.move_to([2, 2, 0] )
UpperCamelCase__ : Any = [meta_mem.copy() for i in range(6 )]
UpperCamelCase__ : int = [meta_mem.copy() for i in range(6 )]
UpperCamelCase__ : Optional[int] = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : Union[str, Any] = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : Union[str, Any] = VGroup(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : Any = Text('''Disk''', font_size=24 )
UpperCamelCase__ : List[Any] = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__magic_name__, run_time=3 ), Write(__magic_name__, run_time=1 ), Create(__magic_name__, run_time=1 ) )
UpperCamelCase__ : Union[str, Any] = []
for i, rect in enumerate(__magic_name__ ):
UpperCamelCase__ : Union[str, Any] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__magic_name__, run_time=1.5 ) )
self.play(*__magic_name__ )
self.play(FadeOut(__magic_name__ ) )
UpperCamelCase__ : Optional[int] = MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection.", font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__magic_name__, run_time=3 ) )
self.play(
FadeOut(__magic_name__, __magic_name__, *__magic_name__, *__magic_name__ ), )
self.wait()
| 369 | 0 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 444 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase : Dict = """docs/source/en/_toctree.yml"""
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = defaultdict(_A )
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Dict = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(_A )
_lowerCAmelCase : str = new_doc_list
_lowerCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase : List[str] = []
for duplicate_key in duplicates:
_lowerCAmelCase : Any = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(_A ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
_lowerCAmelCase : Any = sorted(_A , key=lambda _A : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_A ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(_A )
# Sort
return overview_doc
def lowercase (_A=False ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Union[str, Any] = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Tuple = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : str = content[api_idx]['sections']
# Then to the model doc
_lowerCAmelCase : Dict = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase : Optional[int] = api_doc[scheduler_idx]['sections']
_lowerCAmelCase : Any = clean_doc_toc(_A )
_lowerCAmelCase : int = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase : str = True
if overwrite:
_lowerCAmelCase : int = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase : Dict = api_doc
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_A , allow_unicode=_A ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def lowercase (_A=False ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
_lowerCAmelCase : Optional[int] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : List[Any] = api_doc[pipeline_idx]['sections']
_lowerCAmelCase : Union[str, Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase : Tuple = pipeline_doc['section']
_lowerCAmelCase : Optional[int] = clean_doc_toc(_A )
if overwrite:
_lowerCAmelCase : str = new_sub_pipeline_doc
new_pipeline_docs.append(_A )
# sort overall pipeline doc
_lowerCAmelCase : str = clean_doc_toc(_A )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase : Any = True
if overwrite:
_lowerCAmelCase : Optional[int] = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase : str = api_doc
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_A , allow_unicode=_A ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCAmelCase : List[Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 444 | 1 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_UpperCAmelCase : Optional[Any] = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Dict ) -> Union[str, Any]:
if isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ):
__lowerCAmelCase : int = [image]
__lowerCAmelCase : Union[str, Any] = [trans(img.convert("""RGB""" ) ) for img in image]
__lowerCAmelCase : str = torch.stack(SCREAMING_SNAKE_CASE )
return image
class snake_case_ ( __lowercase ):
def __init__( self : Any , _snake_case : Optional[int] , _snake_case : int )->int:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
__lowerCAmelCase : str = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_snake_case , scheduler=_snake_case )
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : Tuple )->List[Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : Any , _snake_case : Any , _snake_case : Optional[int] )->Any:
'''simple docstring'''
__lowerCAmelCase : int = min(int(num_inference_steps * strength ) , _snake_case )
__lowerCAmelCase : Dict = max(num_inference_steps - init_timestep , 0 )
__lowerCAmelCase : Any = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase__ ( self : str , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Dict=None )->str:
'''simple docstring'''
if not isinstance(_snake_case , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_snake_case )}''' )
__lowerCAmelCase : Optional[int] = image.to(device=_snake_case , dtype=_snake_case )
if isinstance(_snake_case , _snake_case ) and len(_snake_case ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_snake_case )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__lowerCAmelCase : List[str] = init_latents.shape
__lowerCAmelCase : List[str] = randn_tensor(_snake_case , generator=_snake_case , device=_snake_case , dtype=_snake_case )
# get latents
print("""add noise to latents at timestep""" , _snake_case )
__lowerCAmelCase : Dict = self.scheduler.add_noise(_snake_case , _snake_case , _snake_case )
__lowerCAmelCase : Dict = init_latents
return latents
@torch.no_grad()
def __call__( self : Tuple , _snake_case : Union[torch.FloatTensor, PIL.Image.Image] = None , _snake_case : float = 0.8 , _snake_case : int = 1 , _snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _snake_case : float = 0.0 , _snake_case : int = 50 , _snake_case : Optional[bool] = None , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , )->Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(_snake_case )
# 2. Preprocess image
__lowerCAmelCase : List[Any] = preprocess(_snake_case )
# 3. set timesteps
self.scheduler.set_timesteps(_snake_case , device=self.device )
__lowerCAmelCase : int = self.get_timesteps(_snake_case , _snake_case , self.device )
__lowerCAmelCase : Union[str, Any] = timesteps[:1].repeat(_snake_case )
# 4. Prepare latent variables
__lowerCAmelCase : Optional[int] = self.prepare_latents(_snake_case , _snake_case , _snake_case , self.unet.dtype , self.device , _snake_case )
__lowerCAmelCase : Tuple = latents
# 5. Denoising loop
for t in self.progress_bar(_snake_case ):
# 1. predict noise model_output
__lowerCAmelCase : List[Any] = self.unet(_snake_case , _snake_case ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__lowerCAmelCase : Optional[Any] = self.scheduler.step(
_snake_case , _snake_case , _snake_case , eta=_snake_case , use_clipped_model_output=_snake_case , generator=_snake_case , ).prev_sample
__lowerCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCAmelCase : Tuple = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_snake_case ) | 708 |
import sys
import turtle
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :tuple[float, float] , SCREAMING_SNAKE_CASE :tuple[float, float] ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :tuple[float, float] , SCREAMING_SNAKE_CASE :tuple[float, float] , SCREAMING_SNAKE_CASE :tuple[float, float] , SCREAMING_SNAKE_CASE :int , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(SCREAMING_SNAKE_CASE , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , depth - 1 )
triangle(SCREAMING_SNAKE_CASE , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , depth - 1 )
triangle(SCREAMING_SNAKE_CASE , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
_UpperCAmelCase = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
_UpperCAmelCase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1])) | 240 | 0 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase__ ( _lowercase ):
__UpperCAmelCase = """EncodecFeatureExtractor"""
__UpperCAmelCase = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , snake_case , snake_case ) -> Tuple:
"""simple docstring"""
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
lowercase : Dict = self.feature_extractor
lowercase : List[str] = False
def _UpperCAmelCase ( self , snake_case=None , snake_case=None , snake_case=True ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=__UpperCAmelCase , language=__UpperCAmelCase , no_timestamps=__UpperCAmelCase )
def __call__( self , *snake_case , **snake_case ) -> int:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase )
lowercase : Dict = kwargs.pop("""audio""" , __UpperCAmelCase )
lowercase : Any = kwargs.pop("""sampling_rate""" , __UpperCAmelCase )
lowercase : int = kwargs.pop("""text""" , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
lowercase : Any = args[0]
lowercase : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
lowercase : int = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
if audio is not None:
lowercase : Union[str, Any] = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowercase : Union[str, Any] = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
lowercase : Any = audio_inputs['''padding_mask''']
return inputs
def _UpperCAmelCase ( self , *snake_case , **snake_case ) -> List[str]:
"""simple docstring"""
lowercase : Optional[Any] = kwargs.pop("""audio""" , __UpperCAmelCase )
lowercase : int = kwargs.pop("""padding_mask""" , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
lowercase : Any = args[0]
lowercase : str = args[1:]
if audio_values is not None:
return self._decode_audio(__UpperCAmelCase , padding_mask=__UpperCAmelCase )
else:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , *snake_case , **snake_case ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , snake_case , snake_case = None ) -> int:
"""simple docstring"""
lowercase : Any = to_numpy(__UpperCAmelCase )
lowercase : List[str] = audio_values.shape
if padding_mask is None:
return list(__UpperCAmelCase )
lowercase : Optional[int] = to_numpy(__UpperCAmelCase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowercase : Optional[Any] = seq_len - padding_mask.shape[-1]
lowercase : List[Any] = 1 - self.feature_extractor.padding_value
lowercase : Tuple = np.pad(__UpperCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__UpperCAmelCase )
lowercase : Optional[int] = audio_values.tolist()
for i in range(__UpperCAmelCase ):
lowercase : List[str] = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowercase : Dict = sliced_audio.reshape(__UpperCAmelCase , -1 )
return audio_values
| 607 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCAmelCase__ : Optional[Any] = 0
if start < end:
lowerCAmelCase__ : Union[str, Any] = randint(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[str] = a[end]
lowerCAmelCase__ : List[Any] = a[pivot]
lowerCAmelCase__ : str = temp
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = _in_place_partition(UpperCamelCase , UpperCamelCase , UpperCamelCase )
count += _in_place_quick_sort(UpperCamelCase , UpperCamelCase , p - 1 )
count += _in_place_quick_sort(UpperCamelCase , p + 1 , UpperCamelCase )
return count
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : List[Any] = randint(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : str = a[end]
lowerCAmelCase__ : Optional[int] = a[pivot]
lowerCAmelCase__ : List[str] = temp
lowerCAmelCase__ : str = start - 1
for index in range(UpperCamelCase , UpperCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase__ : List[str] = new_pivot_index + 1
lowerCAmelCase__ : int = a[new_pivot_index]
lowerCAmelCase__ : int = a[index]
lowerCAmelCase__ : Tuple = temp
lowerCAmelCase__ : Optional[Any] = a[new_pivot_index + 1]
lowerCAmelCase__ : List[str] = a[end]
lowerCAmelCase__ : Union[str, Any] = temp
return new_pivot_index + 1, count
lowerCAmelCase_ = TemporaryFile()
lowerCAmelCase_ = 1_00 # 1000 elements are to be sorted
lowerCAmelCase_ , lowerCAmelCase_ = 0, 1 # mean and standard deviation
lowerCAmelCase_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
lowerCAmelCase_ = np.load(outfile)
lowerCAmelCase_ = len(M) - 1
lowerCAmelCase_ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 678 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ = None ) -> Dict:
UpperCamelCase_ = word_bank or []
# create a table
UpperCamelCase_ = len(UpperCamelCase_ ) + 1
UpperCamelCase_ = []
for _ in range(UpperCamelCase_ ):
table.append([] )
# seed value
UpperCamelCase_ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(UpperCamelCase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(UpperCamelCase_ )] == word:
UpperCamelCase_ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(UpperCamelCase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(UpperCamelCase_ )]:
combination.reverse()
return table[len(UpperCamelCase_ )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 711 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: pyspark.sql.DataFrame , _SCREAMING_SNAKE_CASE: Optional[NamedSplit] = None , _SCREAMING_SNAKE_CASE: Optional[Features] = None , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: str = None , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: str = None , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: str = "arrow" , **_SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> Any:
"""simple docstring"""
super().__init__(
split=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , streaming=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = load_from_cache_file
UpperCamelCase_ = file_format
UpperCamelCase_ = Spark(
df=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , working_dir=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def lowercase ( self: str ) -> Optional[Any]:
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
UpperCamelCase_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_SCREAMING_SNAKE_CASE , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 371 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ViTFeatureExtractor"""]
lowerCamelCase__ = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225 |
import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict ='bertabs'
def __init__( self : str , __lowercase : int=30522 , __lowercase : str=512 , __lowercase : Dict=6 , __lowercase : Dict=512 , __lowercase : Tuple=8 , __lowercase : Any=512 , __lowercase : int=0.2 , __lowercase : Union[str, Any]=6 , __lowercase : str=768 , __lowercase : int=8 , __lowercase : Union[str, Any]=2048 , __lowercase : List[str]=0.2 , **__lowercase : int , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = vocab_size
__a = max_pos
__a = enc_layers
__a = enc_hidden_size
__a = enc_heads
__a = enc_ff_size
__a = enc_dropout
__a = dec_layers
__a = dec_hidden_size
__a = dec_heads
__a = dec_ff_size
__a = dec_dropout
| 225 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class A__ :
_UpperCAmelCase :CommonSchedulerState
# setable values
_UpperCAmelCase :jnp.ndarray
_UpperCAmelCase :jnp.ndarray
_UpperCAmelCase :Optional[int] = None
@classmethod
def __UpperCamelCase( cls , A_ , A_ , A_ ):
'''simple docstring'''
return cls(common=A_ , init_noise_sigma=A_ , timesteps=A_ )
@dataclass
class A__ ( __snake_case ):
_UpperCAmelCase :DDPMSchedulerState
class A__ ( __snake_case , __snake_case ):
_UpperCAmelCase :Dict = [e.name for e in FlaxKarrasDiffusionSchedulers]
_UpperCAmelCase :jnp.dtype
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return True
@register_to_config
def __init__( self , A_ = 1000 , A_ = 0.00_01 , A_ = 0.02 , A_ = "linear" , A_ = None , A_ = "fixed_small" , A_ = True , A_ = "epsilon" , A_ = jnp.floataa , ):
'''simple docstring'''
UpperCamelCase : int = dtype
def __UpperCamelCase( self , A_ = None ):
'''simple docstring'''
if common is None:
UpperCamelCase : List[Any] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCamelCase : List[Any] = jnp.array(1.0 , dtype=self.dtype )
UpperCamelCase : int = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=A_ , init_noise_sigma=A_ , timesteps=A_ , )
def __UpperCamelCase( self , A_ , A_ , A_ = None ):
'''simple docstring'''
return sample
def __UpperCamelCase( self , A_ , A_ , A_ = () ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCamelCase : Union[str, Any] = (jnp.arange(0 , A_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=A_ , timesteps=A_ , )
def __UpperCamelCase( self , A_ , A_ , A_=None , A_=None ):
'''simple docstring'''
UpperCamelCase : Optional[int] = state.common.alphas_cumprod[t]
UpperCamelCase : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCamelCase : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCamelCase : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCamelCase : str = jnp.clip(A_ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCamelCase : Optional[Any] = jnp.log(jnp.clip(A_ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
UpperCamelCase : Tuple = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCamelCase : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCamelCase : Optional[int] = variance
UpperCamelCase : Dict = state.common.betas[t]
UpperCamelCase : Dict = (predicted_variance + 1) / 2
UpperCamelCase : List[Any] = frac * max_log + (1 - frac) * min_log
return variance
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ = None , A_ = True , ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = timestep
if key is None:
UpperCamelCase : str = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCamelCase : List[str] = jnp.split(A_ , sample.shape[1] , axis=1 )
else:
UpperCamelCase : int = None
# 1. compute alphas, betas
UpperCamelCase : Tuple = state.common.alphas_cumprod[t]
UpperCamelCase : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCamelCase : Union[str, Any] = 1 - alpha_prod_t
UpperCamelCase : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCamelCase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCamelCase : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
UpperCamelCase : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCamelCase : Dict = jnp.clip(A_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase : int = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCamelCase : Union[str, Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase : Any = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCamelCase : Dict = jax.random.split(A_ , num=1 )
UpperCamelCase : List[Any] = jax.random.normal(A_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(A_ , A_ , predicted_variance=A_ ) ** 0.5) * noise
UpperCamelCase : Tuple = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCamelCase : str = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=A_ , state=A_ )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
return add_noise_common(state.common , A_ , A_ , A_ )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
return get_velocity_common(state.common , A_ , A_ , A_ )
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 706 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> str:
if config_name_or_path is None:
UpperCamelCase : Dict = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
UpperCamelCase : Tuple = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
UpperCamelCase : Tuple = question_encoder_name_or_path
UpperCamelCase : Any = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
UpperCamelCase : Optional[Any] = RagConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : Tuple = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : int = gen_config
UpperCamelCase : Dict = question_encoder_config
UpperCamelCase : Tuple = model_class.from_pretrained_question_encoder_generator(
_lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
rag_model.save_pretrained(_lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(_lowerCAmelCase )
# Save tokenizers.
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
__lowerCamelCase : Dict = parser.parse_args()
__lowerCamelCase : Dict = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 38 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = cn.convert_to_negative(a_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase__ ( ) ->int:
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def lowerCAmelCase__ ( ) ->int:
'''simple docstring'''
_UpperCamelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase__ ( ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_UpperCamelCase = canny.canny(a_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase__ ( ) ->List[str]:
'''simple docstring'''
assert gg.gaussian_filter(a_ , 5 , sigma=0.9 ).all()
def lowerCAmelCase__ ( ) ->int:
'''simple docstring'''
_UpperCamelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_UpperCamelCase = conv.img_convolve(a_ , a_ ).astype(a_ )
assert res.any()
def lowerCAmelCase__ ( ) ->int:
'''simple docstring'''
assert med.median_filter(a_ , 3 ).any()
def lowerCAmelCase__ ( ) ->str:
'''simple docstring'''
_UpperCamelCase = sob.sobel_filter(a_ )
assert grad.any() and theta.any()
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = sp.make_sepia(a_ , 20 )
assert sepia.all()
def lowerCAmelCase__ ( a__ = "digital_image_processing/image_data/lena_small.jpg" ) ->int:
'''simple docstring'''
_UpperCamelCase = bs.Burkes(imread(a_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase__ ( a__ = "digital_image_processing/image_data/lena_small.jpg" , ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = rs.NearestNeighbour(imread(a_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
_UpperCamelCase = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
_UpperCamelCase = imread(a_ , 0 )
# Test for get_neighbors_pixel function() return not None
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = image[x_coordinate][y_coordinate]
_UpperCamelCase = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_UpperCamelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_UpperCamelCase = lbp.local_binary_value(a_ , a_ , a_ )
assert lbp_image.any()
| 547 | import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
A__ = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
A__ = logging.get_logger(__name__)
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'maskformer'
_UpperCAmelCase = {'hidden_size': 'mask_feature_size'}
_UpperCAmelCase = ['resnet', 'swin']
_UpperCAmelCase = ['detr']
def __init__( self : List[str] , __snake_case : int = 256 , __snake_case : int = 256 , __snake_case : float = 0.1 , __snake_case : bool = False , __snake_case : Optional[Dict] = None , __snake_case : Optional[Dict] = None , __snake_case : float = 0.0_2 , __snake_case : float = 1.0 , __snake_case : float = 1.0 , __snake_case : float = 1.0 , __snake_case : float = 2_0.0 , __snake_case : Optional[bool] = None , **__snake_case : Any , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowerCamelCase :Optional[int] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(__snake_case , __snake_case ):
lowerCamelCase :int = backbone_config.pop('''model_type''' )
lowerCamelCase :Tuple = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase :Union[str, Any] = config_class.from_dict(__snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
F"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowerCamelCase :Optional[int] = DetrConfig()
else:
# verify that the decoder is supported
lowerCamelCase :int = (
decoder_config.pop('''model_type''' ) if isinstance(__snake_case , __snake_case ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"Transformer Decoder {decoder_type} not supported, please use one of"
F" {','.join(self.decoders_supported )}" )
if isinstance(__snake_case , __snake_case ):
lowerCamelCase :List[Any] = CONFIG_MAPPING[decoder_type]
lowerCamelCase :Optional[Any] = config_class.from_dict(__snake_case )
lowerCamelCase :Tuple = backbone_config
lowerCamelCase :int = decoder_config
# main feature dimension for the model
lowerCamelCase :Union[str, Any] = fpn_feature_size
lowerCamelCase :List[Any] = mask_feature_size
# initializer
lowerCamelCase :Any = init_std
lowerCamelCase :List[str] = init_xavier_std
# Hungarian matcher && loss
lowerCamelCase :List[str] = cross_entropy_weight
lowerCamelCase :Union[str, Any] = dice_weight
lowerCamelCase :Dict = mask_weight
lowerCamelCase :Optional[Any] = use_auxiliary_loss
lowerCamelCase :Dict = no_object_weight
lowerCamelCase :List[Any] = output_auxiliary_logits
lowerCamelCase :Any = self.decoder_config.encoder_attention_heads
lowerCamelCase :List[str] = self.decoder_config.num_hidden_layers
super().__init__(**__snake_case )
@classmethod
def snake_case ( cls : Tuple , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Any ):
return cls(
backbone_config=__snake_case , decoder_config=__snake_case , **__snake_case , )
def snake_case ( self : Tuple ):
lowerCamelCase :Dict = copy.deepcopy(self.__dict__ )
lowerCamelCase :Dict = self.backbone_config.to_dict()
lowerCamelCase :int = self.decoder_config.to_dict()
lowerCamelCase :List[str] = self.__class__.model_type
return output
| 166 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a_ = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 665 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a( UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : list[int], UpperCamelCase__ : int, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ : Any =f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if colsa != 1:
SCREAMING_SNAKE_CASE__ : str =f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ : str =(
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(UpperCamelCase__ )
if len(UpperCamelCase__ ) != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
'''Number of initial values must be equal to number of rows in coefficient '''
f"matrix but received {len(UpperCamelCase__ )} and {rowsa}"
)
raise ValueError(UpperCamelCase__ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
SCREAMING_SNAKE_CASE__ : NDArray[floataa] =np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =table.shape
strictly_diagonally_dominant(UpperCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] =[]
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for col in range(UpperCamelCase__ ):
if col == row:
SCREAMING_SNAKE_CASE__ : int =table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ : Any =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ : int =(temp + val) / denom
new_val.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_val
return [float(UpperCamelCase__ ) for i in new_val]
def _a( UpperCamelCase__ : NDArray[floataa] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =table.shape
SCREAMING_SNAKE_CASE__ : Any =True
for i in range(0, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : int =0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod() | 665 | 1 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = AutoencoderKL
_lowercase = 'sample'
_lowercase = 1e-2
@property
def _UpperCamelCase( self : str ):
a__ : Any = 4
a__ : Union[str, Any] = 3
a__ : Dict = (32, 32)
a__ : int = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase__ )
return {"sample": image}
@property
def _UpperCamelCase( self : List[Any] ):
return (3, 32, 32)
@property
def _UpperCamelCase( self : Dict ):
return (3, 32, 32)
def _UpperCamelCase( self : List[str] ):
a__ : str = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
a__ : List[str] = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase( self : Tuple ):
pass
def _UpperCamelCase( self : Any ):
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _UpperCamelCase( self : List[str] ):
# enable deterministic behavior for gradient checkpointing
a__, a__ : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
a__ : Dict = self.model_class(**lowerCamelCase__ )
model.to(lowerCamelCase__ )
assert not model.is_gradient_checkpointing and model.training
a__ : str = model(**lowerCamelCase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
a__ : Union[str, Any] = torch.randn_like(lowerCamelCase__ )
a__ : Optional[int] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
a__ : Any = self.model_class(**lowerCamelCase__ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase__ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
a__ : int = model_a(**lowerCamelCase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
a__ : Any = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
a__ : str = dict(model.named_parameters() )
a__ : List[str] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _UpperCamelCase( self : Dict ):
a__, a__ : Union[str, Any] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowerCamelCase__ )
a__ : Tuple = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _UpperCamelCase( self : Optional[int] ):
a__ : Optional[int] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
a__ : List[str] = model.to(lowerCamelCase__ )
model.eval()
if torch_device == "mps":
a__ : int = torch.manual_seed(0 )
else:
a__ : List[str] = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
a__ : List[str] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
a__ : Optional[Any] = image.to(lowerCamelCase__ )
with torch.no_grad():
a__ : str = model(lowerCamelCase__ , sample_posterior=lowerCamelCase__ , generator=lowerCamelCase__ ).sample
a__ : Dict = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
a__ : Tuple = torch.tensor(
[
-4.00_78E-01,
-3.83_23E-04,
-1.26_81E-01,
-1.14_62E-01,
2.00_95E-01,
1.08_93E-01,
-8.82_47E-02,
-3.03_61E-01,
-9.86_44E-03,
] )
elif torch_device == "cpu":
a__ : Tuple = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
a__ : Union[str, Any] = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(lowerCamelCase__ , lowerCamelCase__ , rtol=1E-2 ) )
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] ):
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase__ ) for s in shape] )}.npy'''
def _UpperCamelCase( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Dict=0 , lowerCamelCase__ : Optional[int]=(4, 3, 512, 512) , lowerCamelCase__ : Tuple=False ):
a__ : Dict = torch.floataa if fpaa else torch.floataa
a__ : str = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase__ , lowerCamelCase__ ) ) ).to(lowerCamelCase__ ).to(lowerCamelCase__ )
return image
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : List[Any]="CompVis/stable-diffusion-v1-4" , lowerCamelCase__ : Optional[Any]=False ):
a__ : Tuple = "fp16" if fpaa else None
a__ : Optional[Any] = torch.floataa if fpaa else torch.floataa
a__ : Optional[int] = AutoencoderKL.from_pretrained(
lowerCamelCase__ , subfolder="vae" , torch_dtype=lowerCamelCase__ , revision=lowerCamelCase__ , )
model.to(lowerCamelCase__ ).eval()
return model
def _UpperCamelCase( self : Any , lowerCamelCase__ : int=0 ):
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase__ )
return torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ):
a__ : List[str] = self.get_sd_vae_model()
a__ : Dict = self.get_sd_image(lowerCamelCase__ )
a__ : List[str] = self.get_generator(lowerCamelCase__ )
with torch.no_grad():
a__ : str = model(lowerCamelCase__ , generator=lowerCamelCase__ , sample_posterior=lowerCamelCase__ ).sample
assert sample.shape == image.shape
a__ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
a__ : Dict = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any] ):
a__ : Union[str, Any] = self.get_sd_vae_model(fpaa=lowerCamelCase__ )
a__ : List[str] = self.get_sd_image(lowerCamelCase__ , fpaa=lowerCamelCase__ )
a__ : Tuple = self.get_generator(lowerCamelCase__ )
with torch.no_grad():
a__ : List[str] = model(lowerCamelCase__ , generator=lowerCamelCase__ , sample_posterior=lowerCamelCase__ ).sample
assert sample.shape == image.shape
a__ : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
a__ : Dict = torch.tensor(lowerCamelCase__ )
assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ):
a__ : List[Any] = self.get_sd_vae_model()
a__ : str = self.get_sd_image(lowerCamelCase__ )
with torch.no_grad():
a__ : Optional[int] = model(lowerCamelCase__ ).sample
assert sample.shape == image.shape
a__ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
a__ : List[Any] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] ):
a__ : Any = self.get_sd_vae_model()
a__ : Optional[Any] = self.get_sd_image(lowerCamelCase__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
a__ : Dict = model.decode(lowerCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
a__ : Tuple = sample[-1, -2:, :2, -2:].flatten().cpu()
a__ : int = torch.tensor(lowerCamelCase__ )
assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple ):
a__ : str = self.get_sd_vae_model(fpaa=lowerCamelCase__ )
a__ : Any = self.get_sd_image(lowerCamelCase__ , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase__ )
with torch.no_grad():
a__ : List[str] = model.decode(lowerCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
a__ : List[str] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
a__ : Tuple = torch.tensor(lowerCamelCase__ )
assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _UpperCamelCase( self : str , lowerCamelCase__ : List[str] ):
a__ : int = self.get_sd_vae_model(fpaa=lowerCamelCase__ )
a__ : Any = self.get_sd_image(lowerCamelCase__ , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase__ )
with torch.no_grad():
a__ : Union[str, Any] = model.decode(lowerCamelCase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
a__ : List[str] = model.decode(lowerCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : Optional[Any] ):
a__ : Any = self.get_sd_vae_model()
a__ : int = self.get_sd_image(lowerCamelCase__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
a__ : Union[str, Any] = model.decode(lowerCamelCase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
a__ : str = model.decode(lowerCamelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] ):
a__ : Union[str, Any] = self.get_sd_vae_model()
a__ : Optional[Any] = self.get_sd_image(lowerCamelCase__ )
a__ : Any = self.get_generator(lowerCamelCase__ )
with torch.no_grad():
a__ : Optional[Any] = model.encode(lowerCamelCase__ ).latent_dist
a__ : Any = dist.sample(generator=lowerCamelCase__ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
a__ : Any = sample[0, -1, -3:, -3:].flatten().cpu()
a__ : Dict = torch.tensor(lowerCamelCase__ )
a__ : Optional[int] = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=lowerCamelCase__ )
| 37 |
import math
from datetime import datetime, timedelta
def UpperCamelCase_ ( __a ) -> datetime:
a__ : Union[str, Any] = year % 19
a__ : List[str] = year % 4
a__ : str = year % 7
a__ : Any = math.floor(year / 100 )
a__ : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
a__ : Optional[int] = leap_day_inhibits / 4
a__ : Union[str, Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
a__ : Dict = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
a__ : Any = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
a__ : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 18 )
else:
return datetime(__a , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
UpperCamelCase : Tuple = """will be""" if year > datetime.now().year else """was"""
print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
| 37 | 1 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str ) -> int:
SCREAMING_SNAKE_CASE_ : Dict =[False] * len(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple =[-1] * len(UpperCAmelCase_ )
def dfs(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE_ : List[str] =True
SCREAMING_SNAKE_CASE_ : List[str] =c
for u in graph[v]:
if not visited[u]:
dfs(UpperCAmelCase_ , 1 - c )
for i in range(len(UpperCAmelCase_ ) ):
if not visited[i]:
dfs(UpperCAmelCase_ , 0 )
for i in range(len(UpperCAmelCase_ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_lowercase = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 431 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str ) -> int:
def wrapper(*UpperCAmelCase_ : str , **UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE_ : Optional[int] =timeit.default_timer()
SCREAMING_SNAKE_CASE_ : Dict =func(*UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str =timeit.default_timer() - starttime
return delta
SCREAMING_SNAKE_CASE_ : Any =func.__name__
return wrapper
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : dict , UpperCAmelCase_ : Any=1_0_0 , UpperCAmelCase_ : Dict=None ) -> int:
SCREAMING_SNAKE_CASE_ : Optional[Any] =[]
SCREAMING_SNAKE_CASE_ : List[str] =seq_shapes or {}
for i in range(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] ={}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(UpperCAmelCase_ , _ArrayXD ):
SCREAMING_SNAKE_CASE_ : Any =np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(UpperCAmelCase_ , datasets.Value ):
if v.dtype == "string":
SCREAMING_SNAKE_CASE_ : Optional[Any] ='''The small grey turtle was surprisingly fast when challenged.'''
else:
SCREAMING_SNAKE_CASE_ : int =np.random.randint(1_0 , size=1 ).astype(v.dtype ).item()
elif isinstance(UpperCAmelCase_ , datasets.Sequence ):
while isinstance(UpperCAmelCase_ , datasets.Sequence ):
SCREAMING_SNAKE_CASE_ : Tuple =v.feature
SCREAMING_SNAKE_CASE_ : Optional[Any] =seq_shapes[k]
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.random.rand(*UpperCAmelCase_ ).astype(v.dtype )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =data
dummy_data.append((i, example) )
return dummy_data
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str=1_0_0 , UpperCAmelCase_ : Dict=None ) -> Dict:
SCREAMING_SNAKE_CASE_ : Optional[int] =generate_examples(UpperCAmelCase_ , num_examples=UpperCAmelCase_ , seq_shapes=UpperCAmelCase_ )
with ArrowWriter(features=UpperCAmelCase_ , path=UpperCAmelCase_ ) as writer:
for key, record in dummy_data:
SCREAMING_SNAKE_CASE_ : List[Any] =features.encode_example(UpperCAmelCase_ )
writer.write(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
SCREAMING_SNAKE_CASE_ : List[Any] =datasets.Dataset.from_file(filename=UpperCAmelCase_ , info=datasets.DatasetInfo(features=UpperCAmelCase_ ) )
return dataset
| 431 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_UpperCAmelCase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_UpperCAmelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_UpperCAmelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str ) -> tuple[str, float]:
A = len([g for position, g in enumerate(lowerCAmelCase ) if g == main_target[position]] )
return (item, float(lowerCAmelCase ))
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str ) -> tuple[str, str]:
A = random.randint(0, len(lowerCAmelCase ) - 1 )
A = parent_a[:random_slice] + parent_a[random_slice:]
A = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : list[str] ) -> str:
A = list(lowerCAmelCase )
if random.uniform(0, 1 ) < MUTATION_PROBABILITY:
A = random.choice(lowerCAmelCase )
return "".join(lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : tuple[str, float], lowerCAmelCase : list[tuple[str, float]], lowerCAmelCase : list[str], ) -> list[str]:
A = []
# Generate more children proportionally to the fitness score.
A = int(parent_a[1] * 100 ) + 1
A = 10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase ):
A = population_score[random.randint(0, lowerCAmelCase )][0]
A , A = crossover(parent_a[0], lowerCAmelCase )
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase, lowerCAmelCase ) )
pop.append(mutate(lowerCAmelCase, lowerCAmelCase ) )
return pop
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : list[str], lowerCAmelCase : bool = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
A = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowerCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
A = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
A = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowerCAmelCase )
# Generate random starting population.
A = []
for _ in range(lowerCAmelCase ):
population.append(''.join([random.choice(lowerCAmelCase ) for i in range(len(lowerCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
A , A = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
A = [evaluate(lowerCAmelCase, lowerCAmelCase ) for item in population]
# Check if there is a matching evolution.
A = sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x[1], reverse=lowerCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
A = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCAmelCase )
# Normalize population score to be between 0 and 1.
A = [
(item, score / len(lowerCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase ):
population.extend(select(population_score[int(lowerCAmelCase )], lowerCAmelCase, lowerCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
_UpperCAmelCase = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_UpperCAmelCase = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 699 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_UpperCAmelCase = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
A = WATERMARK_BITS
A = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
A = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = [self.encoder.encode(UpperCamelCase__ , 'dwtDct' ) for image in images]
A = torch.from_numpy(np.array(UpperCamelCase__ ) ).permute(0 , 3 , 1 , 2 )
A = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 699 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE = 6 ) -> None:
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Union[str, Any] = None
self.create_linked_list(_SCREAMING_SNAKE_CASE )
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> None:
__lowerCAmelCase : Tuple = Node()
__lowerCAmelCase : Optional[int] = current_node
__lowerCAmelCase : Optional[Any] = current_node
__lowerCAmelCase : str = current_node
for _ in range(1 , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = Node()
__lowerCAmelCase : Dict = current_node
__lowerCAmelCase : int = previous_node
__lowerCAmelCase : Optional[int] = current_node
__lowerCAmelCase : Any = self.front
__lowerCAmelCase : int = previous_node
def snake_case ( self ) -> bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def snake_case ( self ) -> Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
__lowerCAmelCase : Union[str, Any] = self.rear.next
if self.rear:
__lowerCAmelCase : Union[str, Any] = data
def snake_case ( self ) -> Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
__lowerCAmelCase : List[Any] = self.front.data
__lowerCAmelCase : Optional[Any] = None
return data
__lowerCAmelCase : List[str] = self.front
__lowerCAmelCase : str = old_front.next
__lowerCAmelCase : Dict = old_front.data
__lowerCAmelCase : Optional[int] = None
return data
def snake_case ( self ) -> None:
if self.is_empty():
raise Exception('Empty Queue' )
def snake_case ( self ) -> None:
if self.rear and self.rear.next == self.front:
raise Exception('Full Queue' )
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ) -> None:
__lowerCAmelCase : Any = None
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : Union[str, Any] = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
'''simple docstring'''
from math import factorial
A_ = {str(digit): factorial(digit) for digit in range(10)}
def A ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCAmelCase ) )
def A ( _UpperCAmelCase : int = 6_0 ,_UpperCAmelCase : int = 1_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
__lowerCAmelCase : Any = 0
# the cached sizes of the previous chains
__lowerCAmelCase : dict[int, int] = {}
for start_chain_element in range(1 ,_UpperCAmelCase ):
# The temporary set will contain the elements of the chain
__lowerCAmelCase : Union[str, Any] = set()
__lowerCAmelCase : Union[str, Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__lowerCAmelCase : List[str] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_UpperCAmelCase )
chain_set_length += 1
__lowerCAmelCase : Optional[Any] = digit_factorial_sum(_UpperCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__lowerCAmelCase : Any = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution()}''')
| 123 | 0 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCamelCase__( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : Tuple = VQModel
UpperCamelCase : Optional[Any] = "sample"
@property
def __magic_name__ ( self , __UpperCAmelCase=(3_2, 3_2) ):
"""simple docstring"""
__lowercase = 4
__lowercase = 3
__lowercase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
return {"sample": image}
@property
def __magic_name__ ( self ):
"""simple docstring"""
return (3, 3_2, 3_2)
@property
def __magic_name__ ( self ):
"""simple docstring"""
return (3, 3_2, 3_2)
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = {
"""block_out_channels""": [3_2, 6_4],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
__lowercase = self.dummy_input
return init_dict, inputs_dict
def __magic_name__ ( self ):
"""simple docstring"""
pass
def __magic_name__ ( self ):
"""simple docstring"""
pass
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase , __lowercase = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__UpperCAmelCase )
__lowercase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(__UpperCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
__lowercase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
__lowercase = image.to(__UpperCAmelCase )
with torch.no_grad():
__lowercase = model(__UpperCAmelCase ).sample
__lowercase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__lowercase = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
| 566 |
'''simple docstring'''
from manim import *
class lowerCamelCase__( snake_case_ ):
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = Rectangle(height=0.5 , width=0.5 )
__lowercase = Rectangle(height=0.25 , width=0.25 )
__lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
__lowercase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
__lowercase = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
__lowercase = Text("""CPU""" , font_size=2_4 )
__lowercase = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
__lowercase = [mem.copy() for i in range(4 )]
__lowercase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
__lowercase = Text("""GPU""" , font_size=2_4 )
__lowercase = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
__lowercase = Text("""Model""" , font_size=2_4 )
__lowercase = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
__lowercase = []
__lowercase = []
__lowercase = []
for i, rect in enumerate(__UpperCAmelCase ):
rect.set_stroke(__UpperCAmelCase )
__lowercase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 )
self.add(__UpperCAmelCase )
model_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
__lowercase = Text("""Loaded Checkpoint""" , font_size=2_4 )
__lowercase = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__UpperCAmelCase )
__lowercase = []
__lowercase = []
for i, rect in enumerate(__UpperCAmelCase ):
__lowercase = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 )
target.move_to(__UpperCAmelCase )
ckpt_arr.append(__UpperCAmelCase )
__lowercase = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase )
__lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowercase = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__UpperCAmelCase )
__lowercase = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
__lowercase = [meta_mem.copy() for i in range(6 )]
__lowercase = [meta_mem.copy() for i in range(6 )]
__lowercase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
__lowercase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
__lowercase = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
__lowercase = Text("""Disk""" , font_size=2_4 )
__lowercase = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) )
__lowercase = []
for i, rect in enumerate(__UpperCAmelCase ):
__lowercase = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) )
self.play(*__UpperCAmelCase )
self.play(FadeOut(__UpperCAmelCase ) )
__lowercase = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) )
self.play(
FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , )
self.wait()
| 566 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = KandinskyInpaintPipeline
lowerCAmelCase__ : int = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
lowerCAmelCase__ : Dict = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
lowerCAmelCase__ : Tuple = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCAmelCase__ : Optional[Any] = False
@property
def _UpperCAmelCase ( self: Tuple ) -> Tuple:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase ( self: str ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase ( self: List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def _UpperCAmelCase ( self: str ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _UpperCAmelCase ( self: Dict ) -> List[str]:
'''simple docstring'''
return 100
@property
def _UpperCAmelCase ( self: List[Any] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def _UpperCAmelCase ( self: Union[str, Any] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
__UpperCAmelCase = MultilingualCLIP(__snake_case )
__UpperCAmelCase = text_encoder.eval()
return text_encoder
@property
def _UpperCAmelCase ( self: str ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__UpperCAmelCase = UNetaDConditionModel(**__snake_case )
return model
@property
def _UpperCAmelCase ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCAmelCase ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCAmelCase ( self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = self.dummy_text_encoder
__UpperCAmelCase = self.dummy_tokenizer
__UpperCAmelCase = self.dummy_unet
__UpperCAmelCase = self.dummy_movq
__UpperCAmelCase = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type="epsilon" , thresholding=__snake_case , )
__UpperCAmelCase = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _UpperCAmelCase ( self: List[Any] , __lowerCAmelCase: List[Any] , __lowerCAmelCase: str=0 ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case )
__UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
__UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
__UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("RGB" ).resize((256, 256) )
# create mask
__UpperCAmelCase = np.ones((64, 64) , dtype=np.floataa )
__UpperCAmelCase = 0
if str(__snake_case ).startswith("mps" ):
__UpperCAmelCase = torch.manual_seed(__snake_case )
else:
__UpperCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__UpperCAmelCase = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def _UpperCAmelCase ( self: str ) -> int:
'''simple docstring'''
__UpperCAmelCase = '''cpu'''
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = self.pipeline_class(**__snake_case )
__UpperCAmelCase = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__UpperCAmelCase = pipe(**self.get_dummy_inputs(__snake_case ) )
__UpperCAmelCase = output.images
__UpperCAmelCase = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
__UpperCAmelCase = image[0, -3:, -3:, -1]
__UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def _UpperCAmelCase ( self: Dict ) -> List[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self: Dict ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self: Optional[Any] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
__UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__UpperCAmelCase = np.ones((768, 768) , dtype=np.floataa )
__UpperCAmelCase = 0
__UpperCAmelCase = '''a hat'''
__UpperCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
__UpperCAmelCase = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
__UpperCAmelCase = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
__UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCAmelCase = pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__UpperCAmelCase = pipeline(
__snake_case , image=__snake_case , mask_image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
__UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 705 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 286 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> int:
A__ = "laion/clap-htsat-unfused"
A__ = tempfile.mkdtemp()
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self ) -> Optional[int]:
A__ = self.get_tokenizer()
A__ = self.get_feature_extractor()
A__ = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
A__ = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> int:
A__ = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A__ = self.get_feature_extractor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A__ = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Optional[Any]:
A__ = self.get_feature_extractor()
A__ = self.get_tokenizer()
A__ = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
A__ = floats_list((3, 1000) )
A__ = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="np" )
A__ = processor(audios=SCREAMING_SNAKE_CASE__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self ) -> str:
A__ = self.get_feature_extractor()
A__ = self.get_tokenizer()
A__ = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
A__ = "This is a test string"
A__ = processor(text=SCREAMING_SNAKE_CASE__ )
A__ = tokenizer(SCREAMING_SNAKE_CASE__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self ) -> int:
A__ = self.get_feature_extractor()
A__ = self.get_tokenizer()
A__ = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Dict:
A__ = self.get_feature_extractor()
A__ = self.get_tokenizer()
A__ = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 104 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , ) -> Union[str, Any]:
super().__init__()
A__ = nn.Embedding(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = nn.Embedding(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = False
A__ = nn.Dropout(p=SCREAMING_SNAKE_CASE__ )
A__ = TaConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , d_model=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , d_kv=SCREAMING_SNAKE_CASE__ , d_ff=SCREAMING_SNAKE_CASE__ , dropout_rate=SCREAMING_SNAKE_CASE__ , feed_forward_proj=SCREAMING_SNAKE_CASE__ , is_decoder=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , )
A__ = nn.ModuleList()
for lyr_num in range(SCREAMING_SNAKE_CASE__ ):
A__ = TaBlock(SCREAMING_SNAKE_CASE__ )
self.encoders.append(SCREAMING_SNAKE_CASE__ )
A__ = TaLayerNorm(SCREAMING_SNAKE_CASE__ )
A__ = nn.Dropout(p=SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
A__ = self.token_embedder(SCREAMING_SNAKE_CASE__ )
A__ = encoder_input_tokens.shape[1]
A__ = torch.arange(SCREAMING_SNAKE_CASE__ , device=encoder_input_tokens.device )
x += self.position_encoding(SCREAMING_SNAKE_CASE__ )
A__ = self.dropout_pre(SCREAMING_SNAKE_CASE__ )
# inverted the attention mask
A__ = encoder_input_tokens.size()
A__ = self.get_extended_attention_mask(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for lyr in self.encoders:
A__ = lyr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
A__ = self.layer_norm(SCREAMING_SNAKE_CASE__ )
return self.dropout_post(SCREAMING_SNAKE_CASE__ ), encoder_inputs_mask
| 104 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
_UpperCamelCase = TypeVar("""T""")
class lowerCamelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[int] , __A : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = data
lowerCAmelCase__ = self
lowerCAmelCase__ = 0
class lowerCamelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ) -> str:
'''simple docstring'''
lowerCAmelCase__ = {}
def lowercase__ ( self : List[Any] , __A : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = DisjointSetTreeNode(__UpperCamelCase )
def lowercase__ ( self : int , __A : Tuple ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = self.map[data]
if elem_ref != elem_ref.parent:
lowerCAmelCase__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def lowercase__ ( self : int , __A : List[str] , __A : List[str] ) -> Dict:
'''simple docstring'''
if nodea.rank > nodea.rank:
lowerCAmelCase__ = nodea
else:
lowerCAmelCase__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowercase__ ( self : List[Any] , __A : str , __A : Dict ) -> Union[str, Any]:
'''simple docstring'''
self.link(self.find_set(__UpperCamelCase ) , self.find_set(__UpperCamelCase ) )
class lowerCamelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Any ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = {}
def lowercase__ ( self : Optional[int] , __A : Any ) -> Optional[Any]:
'''simple docstring'''
if node not in self.connections:
lowerCAmelCase__ = {}
def lowercase__ ( self : int , __A : List[str] , __A : Dict , __A : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self.add_node(__UpperCamelCase )
self.add_node(__UpperCamelCase )
lowerCAmelCase__ = weight
lowerCAmelCase__ = weight
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __A : x[2] )
# creating the disjoint set
lowerCAmelCase__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__UpperCamelCase )
# MST generation
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = edges[index]
index += 1
lowerCAmelCase__ = disjoint_set.find_set(__UpperCamelCase )
lowerCAmelCase__ = disjoint_set.find_set(__UpperCamelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
disjoint_set.union(__UpperCamelCase , __UpperCamelCase )
return graph
| 702 |
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = """T5Config"""
class lowerCamelCase__ ( _A ):
'''simple docstring'''
A__ = '''mt5'''
A__ = MTaConfig
class lowerCamelCase__ ( _A ):
'''simple docstring'''
A__ = '''mt5'''
A__ = MTaConfig
class lowerCamelCase__ ( _A ):
'''simple docstring'''
A__ = '''mt5'''
A__ = MTaConfig
| 211 | 0 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCAmelCase__: List[str] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowerCAmelCase__: Union[str, Any] = 10
lowerCAmelCase__: Optional[Any] = 256
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> str:
if len(lowerCamelCase__ ) < MIN_NUM_TOKENS:
return None
SCREAMING_SNAKE_CASE_ : str = MinHash(num_perm=lowerCamelCase__ )
for token in set(lowerCamelCase__ ):
min_hash.update(token.encode() )
return min_hash
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> List[Any]:
return {t for t in NON_ALPHA.split(lowerCamelCase__ ) if len(t.strip() ) > 0}
class snake_case_ :
def __init__( self , *,
__lowerCAmelCase = 0.85 , ):
SCREAMING_SNAKE_CASE_ : List[str] = duplication_jaccard_threshold
SCREAMING_SNAKE_CASE_ : List[str] = NUM_PERM
SCREAMING_SNAKE_CASE_ : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
SCREAMING_SNAKE_CASE_ : List[Any] = defaultdict(snake_case_ )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = self._index.query(snake_case_ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(snake_case_ , snake_case_ )
if len(snake_case_ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(snake_case_ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(snake_case_ )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = []
for base, duplicates in self._duplicate_clusters.items():
SCREAMING_SNAKE_CASE_ : List[Any] = [base] + list(snake_case_ )
# reformat the cluster to be a list of dict
SCREAMING_SNAKE_CASE_ : Tuple = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(snake_case_ )
return duplicate_clusters
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = self.get_duplicate_clusters()
with open(snake_case_ , 'w' ) as f:
json.dump(snake_case_ , snake_case_ )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = element
SCREAMING_SNAKE_CASE_ : List[Any] = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Dict:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCamelCase__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Any = DuplicationIndex(duplication_jaccard_threshold=lowerCamelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCamelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCamelCase__ , lowerCamelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Optional[int] = get_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_tokens(lowerCamelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCAmelCase__: Any = None
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Tuple = []
for elementa in cluster:
SCREAMING_SNAKE_CASE_ : Optional[Any] = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(lowerCamelCase__ , lowerCamelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
SCREAMING_SNAKE_CASE_ : Any = 1
extremes.append(lowerCamelCase__ )
return extremes
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
global _shared_dataset
SCREAMING_SNAKE_CASE_ : List[Any] = dataset
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCamelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCamelCase__ , lowerCamelCase__ , ) , total=len(lowerCamelCase__ ) , ):
extremes_list.append(lowerCamelCase__ )
return extremes_list
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.85 ) -> List[str]:
SCREAMING_SNAKE_CASE_ : int = make_duplicate_clusters(lowerCamelCase__ , lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
SCREAMING_SNAKE_CASE_ : List[str] = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = find_extremes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for extremes in extremes_clusters:
for element in extremes:
SCREAMING_SNAKE_CASE_ : Optional[int] = element
SCREAMING_SNAKE_CASE_ : Dict = duplicate_indices - set(extreme_dict.keys() )
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset.filter(lambda SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=lowerCamelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
SCREAMING_SNAKE_CASE_ : Optional[int] = element['base_index'] in extreme_dict
if element["is_extreme"]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = extreme_dict[element['base_index']]['copies']
print(f'Original dataset size: {len(lowerCamelCase__ )}' )
print(f'Number of duplicate clusters: {len(lowerCamelCase__ )}' )
print(f'Files in duplicate cluster: {len(lowerCamelCase__ )}' )
print(f'Unique files in duplicate cluster: {len(lowerCamelCase__ )}' )
print(f'Filtered dataset size: {len(lowerCamelCase__ )}' )
return ds_filter, duplicate_clusters
| 345 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class A :
def __init__( self , snake_case_ , snake_case_=2 , snake_case_=True , snake_case_=False , snake_case_=1_0 , snake_case_=3 , snake_case_=3_2 * 4 , snake_case_=3_2 * 6 , snake_case_=4 , snake_case_=3_2 , ) -> Union[str, Any]:
_a = parent
_a = batch_size
_a = is_training
_a = use_auxiliary_loss
_a = num_queries
_a = num_channels
_a = min_size
_a = max_size
_a = num_labels
_a = mask_feature_size
def __lowerCAmelCase ( self ) -> Any:
_a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case_ )
_a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ )
_a = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5
).float()
_a = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long()
_a = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __lowerCAmelCase ( self ) -> List[str]:
_a , _a , _a , _a , _a = self.prepare_config_and_inputs()
_a = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> List[Any]:
_a = output.encoder_hidden_states
_a = output.pixel_decoder_hidden_states
_a = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , config.decoder_config.decoder_layers )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=False ) -> Dict:
with torch.no_grad():
_a = MaskFormerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
_a = model(snake_case_ , output_hidden_states=snake_case_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
_a = MaskFormerForInstanceSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
def comm_check_on_output(snake_case_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_a = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
_a = model(snake_case_ )
comm_check_on_output(snake_case_ )
_a = model(
pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
comm_check_on_output(snake_case_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A ( a , a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__UpperCAmelCase : Dict = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[Any] = False
def __lowerCAmelCase ( self ) -> str:
_a = MaskFormerModelTester(self )
_a = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Any:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case_ )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def __lowerCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def __lowerCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def __lowerCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def __lowerCAmelCase ( self ) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __lowerCAmelCase ( self ) -> Optional[int]:
pass
def __lowerCAmelCase ( self ) -> int:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(snake_case_ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
@slow
def __lowerCAmelCase ( self ) -> List[str]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
_a = MaskFormerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def __lowerCAmelCase ( self ) -> int:
_a = (self.model_tester.min_size,) * 2
_a = {
"pixel_values": torch.randn((2, 3, *size) , device=snake_case_ ),
"mask_labels": torch.randn((2, 1_0, *size) , device=snake_case_ ),
"class_labels": torch.zeros(2 , 1_0 , device=snake_case_ ).long(),
}
_a = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case_ )
_a = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(snake_case_ ).to(snake_case_ )
_a = model(**snake_case_ , output_attentions=snake_case_ )
self.assertTrue(outputs.attentions is not None )
def __lowerCAmelCase ( self ) -> List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_a = self.all_model_classes[1]
_a , _a , _a , _a , _a = self.model_tester.prepare_config_and_inputs()
_a = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
_a = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss
loss.backward()
def __lowerCAmelCase ( self ) -> str:
# only MaskFormerForInstanceSegmentation has the loss
_a = self.all_model_classes[1]
_a , _a , _a , _a , _a = self.model_tester.prepare_config_and_inputs()
_a = True
_a = True
_a = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
_a = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
_a = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_a = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_a = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_a = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__snake_case : Optional[Any] = 1E-4
def _lowercase ( ):
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> str:
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def __lowerCAmelCase ( self ) -> Any:
_a = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(snake_case_ )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_a = model(**snake_case_ )
_a = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
_a = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
_a = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(snake_case_ )
.eval()
)
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_a = model(**snake_case_ )
# masks_queries_logits
_a = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_a = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
_a = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
_a = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_a = torch.tensor(
[
[1.6_5_1_2E0_0, -5.2_5_7_2E0_0, -3.3_5_1_9E0_0],
[3.6_1_6_9E-0_2, -5.9_0_2_5E0_0, -2.9_3_1_3E0_0],
[1.0_7_6_6E-0_4, -7.7_6_3_0E0_0, -5.1_2_6_3E0_0],
] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def __lowerCAmelCase ( self ) -> List[str]:
_a = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(snake_case_ )
.eval()
)
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_a = model(**snake_case_ )
# masks_queries_logits
_a = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_a = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
_a = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
_a = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_a = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(snake_case_ )
.eval()
)
_a = self.default_image_processor
_a = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="pt" , )
_a = inputs["pixel_values"].to(snake_case_ )
_a = [el.to(snake_case_ ) for el in inputs["mask_labels"]]
_a = [el.to(snake_case_ ) for el in inputs["class_labels"]]
with torch.no_grad():
_a = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
| 131 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
__UpperCamelCase = {
'yjernite/retribert-base-uncased': 5_1_2,
}
__UpperCamelCase = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : int = RetriBertTokenizer
_UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ):
'''simple docstring'''
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
UpperCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars
):
UpperCamelCase__ = getattr(snake_case , normalizer_state.pop("type" ) )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = strip_accents
UpperCamelCase__ = tokenize_chinese_chars
UpperCamelCase__ = normalizer_class(**snake_case )
UpperCamelCase__ = do_lower_case
def snake_case__ ( self , snake_case , snake_case=None ):
'''simple docstring'''
UpperCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCamelCase__ = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 703 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase ) , 'Tatoeba directory does not exist.' )
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=snake_case )
@slow
def snake_case__ ( self ):
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__, UpperCamelCase__ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=snake_case )
assert mmeta["long_pair"] == "heb-eng"
| 185 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None:
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 18 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( a , unittest.TestCase):
'''simple docstring'''
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@property
def _lowerCamelCase ( self) -> List[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCamelCase ( self) -> List[str]:
_A : Optional[int] = ort.SessionOptions()
_A : Any = False
return options
def _lowerCamelCase ( self) -> str:
_A : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_A : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_A : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase)
_A : Tuple = "A red cat sitting on a park bench"
_A : Dict = np.random.RandomState(0)
_A : int = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , mask_image=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__lowerCamelCase , output_type="np" , )
_A : List[Any] = output.images
_A : Tuple = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_A : Tuple = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def _lowerCamelCase ( self) -> List[str]:
_A : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_A : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_A : Dict = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx")
_A : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase)
_A : Optional[int] = "A red cat sitting on a park bench"
_A : Union[str, Any] = np.random.RandomState(0)
_A : int = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , mask_image=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__lowerCamelCase , output_type="np" , )
_A : str = output.images
_A : Optional[int] = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_A : Tuple = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
| 503 | 0 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__:Dict = logging.getLogger()
def _lowerCamelCase( ):
__a = argparse.ArgumentParser()
parser.add_argument("-f" )
__a = parser.parse_args()
return args.f
class snake_case__ ( snake_case_ ):
def a__ ( self ):
__a = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
__a = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ):
__a = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
| 67 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[int] = {"""tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Tuple = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[int] = ["""input_ids""", """attention_mask"""]
_snake_case : Optional[int] = None
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<unk>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase=False , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , add_prefix_space=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , **lowerCamelCase , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67 | 1 |
def lowercase_ ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
snake_case__ : Tuple =int(SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(SCREAMING_SNAKE_CASE )
snake_case__, snake_case__ : List[str] =divmod(SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(SCREAMING_SNAKE_CASE ) + str(SCREAMING_SNAKE_CASE )
def lowercase_ ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
snake_case__ : Optional[int] =str(SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError('''No input value was provided''' )
snake_case__ : Optional[Any] ='''-''' if number.startswith('''-''' ) else ''''''
snake_case__ : List[str] =number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return F'''{negative}0b{binary_recursive(int(SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 381 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCamelCase__ = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def lowercase_ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
inspect_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case__ : Tuple =path + '''.py'''
assert script_name in os.listdir(SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def lowercase_ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
inspect_metric(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] =path + '''.py'''
assert script_name in os.listdir(SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def lowercase_ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
snake_case__ : Optional[int] =get_dataset_config_info(SCREAMING_SNAKE_CASE , config_name=SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
with pytest.raises(SCREAMING_SNAKE_CASE ):
get_dataset_config_info(SCREAMING_SNAKE_CASE , config_name=SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def lowercase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
snake_case__ : Optional[Any] =get_dataset_config_names(SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def lowercase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
snake_case__ : Any =get_dataset_infos(SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
snake_case__ : Optional[Any] =expected_configs[0]
assert expected_config in infos
snake_case__ : int =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def lowercase_ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
snake_case__ : List[str] =get_dataset_infos(SCREAMING_SNAKE_CASE )
assert expected_config in infos
snake_case__ : Union[str, Any] =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def lowercase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
with pytest.raises(SCREAMING_SNAKE_CASE ):
get_dataset_split_names(SCREAMING_SNAKE_CASE , config_name=SCREAMING_SNAKE_CASE )
| 381 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE):
'''simple docstring'''
__magic_name__ : Any = "M-CLIP"
def __init__( self , lowerCAmelCase_=1_0_2_4 , lowerCAmelCase_=7_6_8 , **lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =transformerDimSize
a_ =imageDimSize
super().__init__(**_a)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE):
'''simple docstring'''
__magic_name__ : Optional[int] = MCLIPConfig
def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
super().__init__(_a , *_a , **_a)
a_ =XLMRobertaModel(_a)
a_ =torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
a_ =self.transformer(input_ids=_a , attention_mask=_a)[0]
a_ =(embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(_a), embs
| 705 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowercase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =EfficientNetConfig()
a_ =CONFIG_MAP[model_name]["hidden_dim"]
a_ =CONFIG_MAP[model_name]["width_coef"]
a_ =CONFIG_MAP[model_name]["depth_coef"]
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =CONFIG_MAP[model_name]["dropout_rate"]
a_ =CONFIG_MAP[model_name]["dw_padding"]
a_ ="huggingface/label-files"
a_ ="imagenet-1k-id2label.json"
a_ =1_0_0_0
a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
a_ ={int(lowercase__ ): v for k, v in idalabel.items()}
a_ =idalabel
a_ ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , )
return preprocessor
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
a_ =sorted(set(lowercase__ ) )
a_ =len(lowercase__ )
a_ ={b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
a_ =[]
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
a_ =block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
a_ ={}
for item in rename_keys:
if item[0] in original_param_names:
a_ ="efficientnet." + item[1]
a_ ="classifier.weight"
a_ ="classifier.bias"
return key_mapping
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
a_ =key_mapping[key]
if "_conv" in key and "kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a_ =torch.from_numpy(np.transpose(lowercase__ ) )
else:
a_ =torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =model_classes[model_name](
include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="softmax" , )
a_ =original_model.trainable_variables
a_ =original_model.non_trainable_variables
a_ ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a_ =param.numpy()
a_ =list(tf_params.keys() )
# Load HuggingFace model
a_ =get_efficientnet_config(lowercase__ )
a_ =EfficientNetForImageClassification(lowercase__ ).eval()
a_ =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
a_ =rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
a_ =convert_image_processor(lowercase__ )
a_ =preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
a_ =hf_model(**lowercase__ )
a_ =outputs.logits.detach().numpy()
# Original model inference
a_ =False
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a_ =image.img_to_array(lowercase__ )
a_ =np.expand_dims(lowercase__ , axis=0 )
a_ =original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
a_ =F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowercase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 41 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
__lowerCamelCase : List[Any] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00 ) -> int:
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Any = 2
for i in range(2 , max_n + 1 ):
__lowerCamelCase : Any = pre_numerator
__lowerCamelCase : Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1
__lowerCamelCase : List[Any] = cur_numerator
__lowerCamelCase : Union[str, Any] = e_cont * pre_numerator + temp
return sum_digits(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = hidden_states.shape
__lowerCamelCase : Dict = jax.image.resize(
SCREAMING_SNAKE_CASE_ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
__lowerCamelCase : Optional[Any] = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__lowerCamelCase : str = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int = None
lowerCamelCase : float = 0.0
lowerCamelCase : bool = None
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
__lowerCamelCase : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowerCamelCase : Tuple = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCamelCase : List[str] = nn.Dense(SCREAMING_SNAKE_CASE_ , dtype=self.dtype )
__lowerCamelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowerCamelCase : int = nn.Dropout(self.dropout_prob )
__lowerCamelCase : Union[str, Any] = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCamelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__lowerCamelCase : List[Any] = None
if use_nin_shortcut:
__lowerCamelCase : Any = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Tuple:
__lowerCamelCase : List[Any] = hidden_states
__lowerCamelCase : str = self.norma(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = nn.swish(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , 1 )
__lowerCamelCase : Optional[int] = hidden_states + temb
__lowerCamelCase : List[Any] = self.norma(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = nn.swish(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.dropout(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.conva(SCREAMING_SNAKE_CASE_ )
if self.conv_shortcut is not None:
__lowerCamelCase : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE_ )
return hidden_states + residual
| 13 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = value
SCREAMING_SNAKE_CASE__ : Node | None = None
SCREAMING_SNAKE_CASE__ : Node | None = None
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = tree
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__(self ) -> Optional[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
"""simple docstring"""
UpperCAmelCase__ : Dict = [
(1_0_0_0, 'M'),
(9_0_0, 'CM'),
(5_0_0, 'D'),
(4_0_0, 'CD'),
(1_0_0, 'C'),
(9_0, 'XC'),
(5_0, 'L'),
(4_0, 'XL'),
(1_0, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
while place < len(_snake_case ):
if (place + 1 < len(_snake_case )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for arabic, roman in ROMAN:
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : List[Any] = divmod(_snake_case ,_snake_case )
result.append(roman * factor )
if number == 0:
break
return "".join(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 545 | 0 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: int ):
"""simple docstring"""
snake_case : int = int(number**0.5 )
return number == sq * sq
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: int , lowerCamelCase_: int , lowerCamelCase_: int , lowerCamelCase_: int , lowerCamelCase_: int , lowerCamelCase_: int ):
"""simple docstring"""
snake_case : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
snake_case : int = x_den * y_den * z_den
snake_case : int = gcd(lowerCamelCase_ , lowerCamelCase_ )
top //= hcf
bottom //= hcf
return top, bottom
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: int = 3_5 ):
"""simple docstring"""
snake_case : set = set()
snake_case : int
snake_case : Fraction = Fraction(0 )
snake_case : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
snake_case : Tuple = x_num * y_den + x_den * y_num
snake_case : str = x_den * y_den
snake_case : Any = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case : int = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
snake_case : List[Any] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
snake_case : int = x_den * x_den * y_den * y_den
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
snake_case : Union[str, Any] = int(sqrt(lowerCamelCase_ ) )
snake_case : Union[str, Any] = int(sqrt(lowerCamelCase_ ) )
snake_case : int = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case : Optional[Any] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=-1
snake_case : int = x_num * y_num
snake_case : Optional[int] = x_den * y_num + x_num * y_den
snake_case : Any = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case : Optional[int] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
snake_case : str = x_num * x_num * y_num * y_num
snake_case : List[str] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
snake_case : List[str] = int(sqrt(lowerCamelCase_ ) )
snake_case : List[Any] = int(sqrt(lowerCamelCase_ ) )
snake_case : Tuple = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case : List[Any] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
for num, den in unique_s:
total += Fraction(lowerCamelCase_ , lowerCamelCase_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'''{solution() = }''')
| 449 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[int] , lowerCamelCase_: Union[str, Any] ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
snake_case : List[str] = flax_key_tuple[:-1] + ("weight",)
snake_case : Union[str, Any] = torch.permute(lowerCamelCase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCamelCase_ ):
# linear layer
snake_case : int = flax_key_tuple[:-1] + ("weight",)
snake_case : Dict = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
snake_case : Any = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Union[str, Any] , lowerCamelCase_: Union[str, Any] , lowerCamelCase_: str ):
"""simple docstring"""
if "metadata" in layer:
snake_case : Dict = layer.split("metadata" )
snake_case : Optional[Any] = "".join(split_layer[0] )[:-1]
snake_case : Any = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
snake_case : List[str] = layer.split("kvstore" )
snake_case : Tuple = "".join(split_layer[0] )[:-1]
snake_case : Union[str, Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
snake_case : List[Any] = layer.split("/" )
snake_case : Union[str, Any] = "/".join(split_layer[:-1] )
snake_case : int = (split_layer[-1],)
if "kvstore/path" in layer:
snake_case : str = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
snake_case : Tuple = "file"
else:
snake_case : int = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[Any] , lowerCamelCase_: Dict ):
"""simple docstring"""
snake_case : Optional[int] = rename_keys(lowerCamelCase_ )
snake_case : str = {}
for k, v in current_block.items():
snake_case : List[str] = v
snake_case : List[str] = new_current_block
torch.save(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Tuple , lowerCamelCase_: Optional[Any] , lowerCamelCase_: Dict , lowerCamelCase_: int , lowerCamelCase_: str = WEIGHTS_NAME ):
"""simple docstring"""
snake_case : List[str] = convert_file_size_to_int(lowerCamelCase_ )
snake_case : List[Any] = []
snake_case : Dict = {}
snake_case : str = 0
snake_case : List[str] = 0
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
snake_case : List[Any] = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
snake_case : Union[str, Any] = flatten_dict(lowerCamelCase_ , sep="/" )
snake_case : Optional[int] = {}
for layer in checkpoint_info.keys():
snake_case , snake_case , snake_case : Union[str, Any] = get_key_and_tensorstore_dict(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if curr_real_layer_name in all_layers:
snake_case : str = content
else:
snake_case : Any = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
snake_case : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
snake_case : Tuple = torch.tensor(lowerCamelCase_ )
snake_case : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
snake_case , snake_case : Dict = rename_base_flax_keys(tuple(key.split("/" ) ) , lowerCamelCase_ )
snake_case : Union[str, Any] = "/".join(lowerCamelCase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
snake_case : str = os.path.join(
lowerCamelCase_ , weights_name.replace(".bin" , f'''-{len(lowerCamelCase_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowerCamelCase_ , lowerCamelCase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
snake_case : Any = {}
snake_case : Union[str, Any] = 0
snake_case : Any = raw_weights.to(getattr(lowerCamelCase_ , lowerCamelCase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
snake_case : List[Any] = os.path.join(lowerCamelCase_ , weights_name.replace(".bin" , f'''-{len(lowerCamelCase_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowerCamelCase_ , lowerCamelCase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowerCamelCase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
snake_case : List[Any] = {}
snake_case : Dict = {}
for idx, shard in enumerate(lowerCamelCase_ ):
snake_case : List[Any] = weights_name.replace(
".bin" , f'''-{idx+1:05d}-of-{len(lowerCamelCase_ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
snake_case : Tuple = os.path.join(lowerCamelCase_ , weights_name.replace(".bin" , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
snake_case : Union[str, Any] = shard
for key in shard:
snake_case : List[Any] = shard_file
# Add the metadata
snake_case : Optional[int] = {"total_size": total_size}
snake_case : List[str] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , "w" , encoding="utf-8" ) as f:
snake_case : Tuple = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + "\n"
f.write(lowerCamelCase_ )
return metadata, index
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
snake_case : List[Any] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
snake_case : List[Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
snake_case : Dict = TaTokenizer.from_pretrained("t5-small" )
snake_case : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
snake_case : Dict = tokenizer(lowerCamelCase_ , return_tensors="pt" ).input_ids
snake_case : Optional[int] = model.generate(lowerCamelCase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 449 | 1 |
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
lowerCAmelCase_ = logging.getLogger(__name__)
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "token-classification"
def __init__( self : Dict , _A : int):
"""simple docstring"""
if type(_A) == dict:
_SCREAMING_SNAKE_CASE : Dict = Namespace(**_A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = import_module("""tasks""")
try:
_SCREAMING_SNAKE_CASE : Any = getattr(_A , hparams.task_type)
_SCREAMING_SNAKE_CASE : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""")
_SCREAMING_SNAKE_CASE : Any = self.token_classification_task.get_labels(hparams.labels)
_SCREAMING_SNAKE_CASE : Dict = CrossEntropyLoss().ignore_index
super().__init__(_A , len(self.labels) , self.mode)
def _lowerCAmelCase ( self : Optional[Any] , **_A : Dict):
"""simple docstring"""
return self.model(**_A)
def _lowerCAmelCase ( self : Optional[Any] , _A : Any , _A : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
_SCREAMING_SNAKE_CASE : Dict = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
_SCREAMING_SNAKE_CASE : Optional[Any] = self(**_A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = self.hparams
for mode in ["train", "dev", "test"]:
_SCREAMING_SNAKE_CASE : List[str] = self._feature_file(_A)
if os.path.exists(_A) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , _A)
_SCREAMING_SNAKE_CASE : List[str] = torch.load(_A)
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir)
_SCREAMING_SNAKE_CASE : List[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , _A)
_SCREAMING_SNAKE_CASE : Optional[int] = self.token_classification_task.convert_examples_to_features(
_A , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""]) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=_A , pad_on_left=bool(self.config.model_type in ["""xlnet"""]) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , _A)
torch.save(_A , _A)
def _lowerCAmelCase ( self : Dict , _A : int , _A : int , _A : bool = False):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = self._feature_file(_A)
logger.info("""Loading features from cached file %s""" , _A)
_SCREAMING_SNAKE_CASE : List[Any] = torch.load(_A)
_SCREAMING_SNAKE_CASE : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
if features[0].token_type_ids is not None:
_SCREAMING_SNAKE_CASE : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
else:
_SCREAMING_SNAKE_CASE : Any = torch.tensor([0 for f in features] , dtype=torch.long)
# HACK(we will not use this anymore soon)
_SCREAMING_SNAKE_CASE : Tuple = torch.tensor([f.label_ids for f in features] , dtype=torch.long)
return DataLoader(
TensorDataset(_A , _A , _A , _A) , batch_size=_A)
def _lowerCAmelCase ( self : Dict , _A : Any , _A : str):
"""simple docstring"""
"""Compute validation""" ""
_SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
_SCREAMING_SNAKE_CASE : Optional[int] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
_SCREAMING_SNAKE_CASE : int = self(**_A)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = outputs[:2]
_SCREAMING_SNAKE_CASE : Tuple = logits.detach().cpu().numpy()
_SCREAMING_SNAKE_CASE : Any = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCAmelCase ( self : int , _A : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = torch.stack([x["""val_loss"""] for x in outputs]).mean()
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.concatenate([x["""pred"""] for x in outputs] , axis=0)
_SCREAMING_SNAKE_CASE : int = np.argmax(_A , axis=2)
_SCREAMING_SNAKE_CASE : Tuple = np.concatenate([x["""target"""] for x in outputs] , axis=0)
_SCREAMING_SNAKE_CASE : Optional[Any] = dict(enumerate(self.labels))
_SCREAMING_SNAKE_CASE : str = [[] for _ in range(out_label_ids.shape[0])]
_SCREAMING_SNAKE_CASE : List[Any] = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
_SCREAMING_SNAKE_CASE : Dict = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(_A , _A),
"""precision""": precision_score(_A , _A),
"""recall""": recall_score(_A , _A),
"""f1""": fa_score(_A , _A),
}
_SCREAMING_SNAKE_CASE : Tuple = dict(results.items())
_SCREAMING_SNAKE_CASE : int = results
return ret, preds_list, out_label_list
def _lowerCAmelCase ( self : Optional[int] , _A : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self._eval_end(_A)
_SCREAMING_SNAKE_CASE : Dict = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCAmelCase ( self : int , _A : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self._eval_end(_A)
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
_SCREAMING_SNAKE_CASE : str = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCAmelCase ( _A : Optional[int] , _A : int):
"""simple docstring"""
BaseTransformer.add_model_specific_args(_A , _A)
parser.add_argument(
"""--task_type""" , default="""NER""" , type=_A , help="""Task type to fine tune in training (e.g. NER, POS, etc)""")
parser.add_argument(
"""--max_seq_length""" , default=1_2_8 , type=_A , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=_A , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=_A , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""")
return parser
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
lowerCAmelCase_ = NERTransformer.add_model_specific_args(parser, os.getcwd())
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = NERTransformer(args)
lowerCAmelCase_ = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
lowerCAmelCase_ = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
lowerCAmelCase_ = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 635 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "sew"
def __init__( self : List[Any] , _A : Tuple=3_2 , _A : str=7_6_8 , _A : Dict=1_2 , _A : Tuple=1_2 , _A : Optional[Any]=3_0_7_2 , _A : List[str]=2 , _A : Dict="gelu" , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.0 , _A : str=0.1 , _A : Tuple=0.1 , _A : Optional[int]=0.02 , _A : Dict=1e-5 , _A : str="group" , _A : Tuple="gelu" , _A : Union[str, Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _A : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : Any=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : Tuple=False , _A : Tuple=1_2_8 , _A : int=1_6 , _A : Union[str, Any]=True , _A : Optional[Any]=0.05 , _A : List[Any]=1_0 , _A : Union[str, Any]=2 , _A : Tuple=0.0 , _A : Union[str, Any]=1_0 , _A : Optional[int]=0 , _A : Union[str, Any]="mean" , _A : Optional[int]=False , _A : List[Any]=False , _A : int=2_5_6 , _A : str=0 , _A : Optional[int]=1 , _A : List[Any]=2 , **_A : Dict , ):
"""simple docstring"""
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A)
_SCREAMING_SNAKE_CASE : str = hidden_size
_SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_norm
_SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation
_SCREAMING_SNAKE_CASE : Dict = list(_A)
_SCREAMING_SNAKE_CASE : int = list(_A)
_SCREAMING_SNAKE_CASE : int = list(_A)
_SCREAMING_SNAKE_CASE : str = conv_bias
_SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embeddings
_SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embedding_groups
_SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim)
_SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[str] = intermediate_size
_SCREAMING_SNAKE_CASE : str = squeeze_factor
_SCREAMING_SNAKE_CASE : Dict = hidden_act
_SCREAMING_SNAKE_CASE : str = num_attention_heads
_SCREAMING_SNAKE_CASE : Dict = hidden_dropout
_SCREAMING_SNAKE_CASE : Tuple = attention_dropout
_SCREAMING_SNAKE_CASE : int = activation_dropout
_SCREAMING_SNAKE_CASE : Any = feat_proj_dropout
_SCREAMING_SNAKE_CASE : str = final_dropout
_SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop
_SCREAMING_SNAKE_CASE : Any = layer_norm_eps
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : List[Any] = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_SCREAMING_SNAKE_CASE : List[Any] = apply_spec_augment
_SCREAMING_SNAKE_CASE : List[Any] = mask_time_prob
_SCREAMING_SNAKE_CASE : List[str] = mask_time_length
_SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks
_SCREAMING_SNAKE_CASE : List[Any] = mask_feature_prob
_SCREAMING_SNAKE_CASE : int = mask_feature_length
_SCREAMING_SNAKE_CASE : List[Any] = mask_feature_min_masks
# ctc loss
_SCREAMING_SNAKE_CASE : int = ctc_loss_reduction
_SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity
# sequence classification
_SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum
_SCREAMING_SNAKE_CASE : List[str] = classifier_proj_size
@property
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1)
| 635 | 1 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE__ : Dict = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class snake_case :
lowercase_ = PegasusConfig
lowercase_ = {}
lowercase_ = 'gelu'
def __init__( self : Dict , a_ : Any , a_ : Tuple=13 , a_ : Optional[Any]=7 , a_ : List[Any]=True , a_ : Dict=False , a_ : Union[str, Any]=99 , a_ : Dict=32 , a_ : int=5 , a_ : Optional[int]=4 , a_ : Optional[int]=37 , a_ : List[Any]=0.1 , a_ : Any=0.1 , a_ : Dict=20 , a_ : List[str]=2 , a_ : Optional[Any]=1 , a_ : Optional[Any]=0 , )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : str = seq_length
SCREAMING_SNAKE_CASE__ : int = is_training
SCREAMING_SNAKE_CASE__ : str = use_labels
SCREAMING_SNAKE_CASE__ : int = vocab_size
SCREAMING_SNAKE_CASE__ : Any = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Dict = eos_token_id
SCREAMING_SNAKE_CASE__ : Dict = pad_token_id
SCREAMING_SNAKE_CASE__ : Any = bos_token_id
def __lowercase( self : Dict )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ : Dict = np.concatenate([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_pegasus_inputs_dict(a_ , a_ , a_ )
return config, inputs_dict
def __lowercase( self : str , a_ : Any , a_ : str , a_ : str )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = 20
SCREAMING_SNAKE_CASE__ : int = model_class_name(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = model.encode(inputs_dict['input_ids'] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
SCREAMING_SNAKE_CASE__ : List[str] = model.init_cache(decoder_input_ids.shape[0] , a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
SCREAMING_SNAKE_CASE__ : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE__ : str = model.decode(
decoder_input_ids[:, :-1] , a_ , decoder_attention_mask=a_ , past_key_values=a_ , decoder_position_ids=a_ , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , a_ , decoder_attention_mask=a_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=a_ , )
SCREAMING_SNAKE_CASE__ : Tuple = model.decode(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def __lowercase( self : List[str] , a_ : Union[str, Any] , a_ : Dict , a_ : Tuple )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 20
SCREAMING_SNAKE_CASE__ : str = model_class_name(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = model.encode(inputs_dict['input_ids'] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
SCREAMING_SNAKE_CASE__ : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
SCREAMING_SNAKE_CASE__ : str = model.init_cache(decoder_input_ids.shape[0] , a_ , a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE__ : Dict = model.decode(
decoder_input_ids[:, :-1] , a_ , decoder_attention_mask=a_ , past_key_values=a_ , decoder_position_ids=a_ , )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE__ : int = model.decode(
decoder_input_ids[:, -1:] , a_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=a_ , decoder_position_ids=a_ , )
SCREAMING_SNAKE_CASE__ : Any = model.decode(a_ , a_ , decoder_attention_mask=a_ )
SCREAMING_SNAKE_CASE__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def _a ( lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : str=None , lowercase__ : Union[str, Any]=None , ):
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.not_equal(lowercase__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : str = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowercase( self : Union[str, Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxPegasusModelTester(self )
SCREAMING_SNAKE_CASE__ : Dict = ConfigTester(self , config_class=a_ )
def __lowercase( self : Union[str, Any] )-> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase( self : List[str] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(a_ , a_ , a_ )
def __lowercase( self : Optional[int] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(a_ , a_ , a_ )
def __lowercase( self : Optional[int] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE__ : Tuple = self._prepare_for_class(a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(a_ )
@jax.jit
def encode_jitted(a_ : Any , a_ : Any=None , **a_ : int ):
return model.encode(input_ids=a_ , attention_mask=a_ )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE__ : str = encode_jitted(**a_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE__ : Dict = encode_jitted(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) )
for jitted_output, output in zip(a_ , a_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE__ : int = model_class(a_ )
SCREAMING_SNAKE_CASE__ : str = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
SCREAMING_SNAKE_CASE__ : List[str] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(a_ : List[Any] , a_ : Dict , a_ : int ):
return model.decode(
decoder_input_ids=a_ , decoder_attention_mask=a_ , encoder_outputs=a_ , )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE__ : Any = decode_jitted(**a_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE__ : List[Any] = decode_jitted(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) )
for jitted_output, output in zip(a_ , a_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowercase( self : Tuple )-> int:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : int = model_class_name.from_pretrained('google/pegasus-large' , from_pt=a_ )
SCREAMING_SNAKE_CASE__ : Dict = np.ones((1, 1) )
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ )
self.assertIsNotNone(a_ )
@slow
def __lowercase( self : Any )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
SCREAMING_SNAKE_CASE__ : List[Any] = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
SCREAMING_SNAKE_CASE__ : int = tokenizer(a_ , return_tensors='np' , truncation=a_ , max_length=512 , padding=a_ )
SCREAMING_SNAKE_CASE__ : List[str] = model.generate(**a_ , num_beams=2 ).sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
assert tgt_text == decoded
| 85 |
"""simple docstring"""
from itertools import product
def __magic_name__ ( UpperCamelCase : int , UpperCamelCase : int ) -> list[int]:
a__ = sides_number
a__ = max_face_number * dice_number
a__ = [0] * (max_total + 1)
a__ = 1
a__ = range(UpperCamelCase , max_face_number + 1 )
for dice_numbers in product(UpperCamelCase , repeat=UpperCamelCase ):
a__ = sum(UpperCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def __magic_name__ ( ) -> float:
a__ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
a__ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
a__ = 0
a__ = 9
a__ = 4 * 9
a__ = 6
for peter_total in range(UpperCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
a__ = (4**9) * (6**6)
a__ = peter_wins_count / total_games_number
a__ = round(UpperCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 273 | 0 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def snake_case_ ( lowerCAmelCase_ : List[str] ):
__lowercase : List[Any] = analyze_text(_lowercase )
__lowercase : Any = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
__lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
__lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__lowercase : Any = single_char_strings[ch]
__lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(F"{round(-1 * my_fir_sum ):.1f}" )
# two len string
__lowercase : str = sum(two_char_strings.values() )
__lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
__lowercase : int = two_char_strings[sequence]
__lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(F"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(F"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def snake_case_ ( lowerCAmelCase_ : Any ):
__lowercase : Optional[Any] = Counter() # type: ignore
__lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def snake_case_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 704 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Dict , __a : Union[str, Any]=13 , __a : Dict=7 , __a : Dict=True , __a : Dict=True , __a : Any=True , __a : List[str]=True , __a : int=99 , __a : Optional[int]=32 , __a : str=2 , __a : int=4 , __a : List[str]=37 , __a : Union[str, Any]="gelu" , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.1 , __a : List[Any]=512 , __a : int=16 , __a : Union[str, Any]=2 , __a : Union[str, Any]=0.02 , __a : List[str]=3 , __a : Dict=4 , __a : Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = parent
__lowercase : Tuple = 13
__lowercase : Dict = 7
__lowercase : List[Any] = True
__lowercase : Tuple = True
__lowercase : List[str] = True
__lowercase : Any = True
__lowercase : Optional[int] = 99
__lowercase : str = 384
__lowercase : Optional[Any] = 2
__lowercase : Dict = 4
__lowercase : str = 37
__lowercase : Optional[int] = """gelu"""
__lowercase : int = 0.1
__lowercase : Union[str, Any] = 0.1
__lowercase : Tuple = 512
__lowercase : Tuple = 16
__lowercase : Optional[int] = 2
__lowercase : Optional[Any] = 0.02
__lowercase : Dict = 3
__lowercase : Union[str, Any] = 4
__lowercase : Tuple = 128
__lowercase : Optional[Any] = 2
__lowercase : int = 9
__lowercase : List[Any] = 1
__lowercase : Union[str, Any] = None
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[Any] = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
if self.use_token_type_ids:
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[Any] = None
__lowercase : str = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Optional[int] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict , __a : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : str , __a : Union[str, Any] , __a : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TFConvBertModel(config=__a )
__lowercase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase : Any = [input_ids, input_mask]
__lowercase : Dict = model(__a )
__lowercase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Dict , __a : str ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = TFConvBertForMaskedLM(config=__a )
__lowercase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : Any = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : Any , __a : Optional[int] , __a : int , __a : int , __a : List[Any] , __a : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.num_labels
__lowercase : List[Any] = TFConvBertForSequenceClassification(config=__a )
__lowercase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[str] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] , __a : Any , __a : Optional[Any] , __a : int , __a : Optional[int] , __a : Tuple , __a : int , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = self.num_choices
__lowercase : Dict = TFConvBertForMultipleChoice(config=__a )
__lowercase : List[str] = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : int = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowercase : Dict = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[str] , __a : str , __a : List[str] , __a : List[str] , __a : List[str] , __a : Any , __a : Tuple , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Tuple = TFConvBertForTokenClassification(config=__a )
__lowercase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : str = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __a : Optional[int] , __a : List[str] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Any , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = TFConvBertForQuestionAnswering(config=__a )
__lowercase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : int = config_and_inputs
__lowercase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Union[str, Any] = False
_A : List[str] = False
_A : Dict = False
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : int = TFConvBertModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Union[str, Any] = True
__lowercase : List[Any] = True
if hasattr(__a , """use_cache""" ):
__lowercase : Optional[Any] = True
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : int = getattr(self.model_tester , """key_length""" , __a )
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Tuple = model_class(__a )
__lowercase : Tuple = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
__lowercase : List[Any] = os.path.join(__a , """saved_model""" , """1""" )
__lowercase : str = tf.keras.models.load_model(__a )
__lowercase : Optional[int] = model(__a )
if self.is_encoder_decoder:
__lowercase : Union[str, Any] = outputs["""encoder_hidden_states"""]
__lowercase : Union[str, Any] = outputs["""encoder_attentions"""]
else:
__lowercase : Union[str, Any] = outputs["""hidden_states"""]
__lowercase : List[str] = outputs["""attentions"""]
self.assertEqual(len(__a ) , __a )
__lowercase : List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : List[str] = True
__lowercase : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
__lowercase : Optional[int] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : List[str] = getattr(self.model_tester , """key_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """key_length""" , __a )
def check_decoder_attentions_output(__a : List[str] ):
__lowercase : Union[str, Any] = len(__a )
self.assertEqual(out_len % 2 , 0 )
__lowercase : Any = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a : str ):
__lowercase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase : int = True
__lowercase : Any = False
__lowercase : List[Any] = model_class(__a )
__lowercase : Tuple = model(self._prepare_for_class(__a , __a ) )
__lowercase : Dict = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
__lowercase : Any = model_class(__a )
__lowercase : List[str] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase : Dict = True
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
__lowercase : List[str] = True
__lowercase : List[Any] = True
__lowercase : Any = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
__lowercase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase : Tuple = model(__a )[0]
__lowercase : Any = [1, 6, 768]
self.assertEqual(output.shape , __a )
__lowercase : Optional[Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 ) | 649 | 0 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
snake_case = TypeVar("""T""")
class A_ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[int] ,__A : bool = True ) -> None:
_lowercase = {} # dictionary of lists
_lowercase = directed
def __UpperCAmelCase ( self : Dict ,__A : T ,__A : T ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__A )
self.adj_list[destination_vertex].append(__A )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__A )
_lowercase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__A )
_lowercase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_lowercase = [destination_vertex]
_lowercase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__A )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__A )
_lowercase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_lowercase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_lowercase = [destination_vertex]
_lowercase = []
return self
def __repr__( self : Tuple ) -> str:
return pformat(self.adj_list ) | 67 | '''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase = 400_0000 ):
lowercase__ : List[Any] = [0, 1]
lowercase__ : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowercase__ : Dict = 0
for j in range(len(UpperCAmelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 152 | 0 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCamelCase : Dict = logging.get_logger(__name__)
# General docstring
UpperCamelCase : Dict = 'MobileNetV1Config'
# Base docstring
UpperCamelCase : Tuple = 'google/mobilenet_v1_1.0_224'
UpperCamelCase : Any = [1, 10_24, 7, 7]
# Image classification docstring
UpperCamelCase : List[Any] = 'google/mobilenet_v1_1.0_224'
UpperCamelCase : int = 'tabby, tabby cat'
UpperCamelCase : Optional[Any] = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : str=None ):
lowerCamelCase__ = {}
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = model.mobilenet_va
else:
lowerCamelCase__ = model
lowerCamelCase__ = """MobilenetV1/Conv2d_0/"""
lowerCamelCase__ = backbone.conv_stem.convolution.weight
lowerCamelCase__ = backbone.conv_stem.normalization.bias
lowerCamelCase__ = backbone.conv_stem.normalization.weight
lowerCamelCase__ = backbone.conv_stem.normalization.running_mean
lowerCamelCase__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
lowerCamelCase__ = i + 1
lowerCamelCase__ = i * 2
lowerCamelCase__ = backbone.layer[pt_index]
lowerCamelCase__ = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
lowerCamelCase__ = pointer.convolution.weight
lowerCamelCase__ = pointer.normalization.bias
lowerCamelCase__ = pointer.normalization.weight
lowerCamelCase__ = pointer.normalization.running_mean
lowerCamelCase__ = pointer.normalization.running_var
lowerCamelCase__ = backbone.layer[pt_index + 1]
lowerCamelCase__ = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
lowerCamelCase__ = pointer.convolution.weight
lowerCamelCase__ = pointer.normalization.bias
lowerCamelCase__ = pointer.normalization.weight
lowerCamelCase__ = pointer.normalization.running_mean
lowerCamelCase__ = pointer.normalization.running_var
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
lowerCamelCase__ = model.classifier.weight
lowerCamelCase__ = model.classifier.bias
return tf_to_pt_map
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
lowerCamelCase__ = tf.train.list_variables(__lowerCAmelCase )
lowerCamelCase__ = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
lowerCamelCase__ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = array
# Build TF to PyTorch weights loading map
lowerCamelCase__ = _build_tf_to_pytorch_map(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
lowerCamelCase__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
lowerCamelCase__ = np.transpose(__lowerCAmelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
lowerCamelCase__ = array.squeeze().transpose()
else:
lowerCamelCase__ = np.transpose(__lowerCAmelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
lowerCamelCase__ = torch.from_numpy(__lowerCAmelCase )
tf_weights.pop(__lowerCAmelCase , __lowerCAmelCase )
tf_weights.pop(name + """/RMSProp""" , __lowerCAmelCase )
tf_weights.pop(name + """/RMSProp_1""" , __lowerCAmelCase )
tf_weights.pop(name + """/ExponentialMovingAverage""" , __lowerCAmelCase )
logger.info(F'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' )
return model
def A__ ( __lowerCAmelCase : torch.Tensor , __lowerCAmelCase : nn.Convad ):
lowerCamelCase__ , lowerCamelCase__ = features.shape[-2:]
lowerCamelCase__ , lowerCamelCase__ = conv_layer.stride
lowerCamelCase__ , lowerCamelCase__ = conv_layer.kernel_size
if in_height % stride_height == 0:
lowerCamelCase__ = max(kernel_height - stride_height , 0 )
else:
lowerCamelCase__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowerCamelCase__ = max(kernel_width - stride_width , 0 )
else:
lowerCamelCase__ = max(kernel_width - (in_width % stride_width) , 0 )
lowerCamelCase__ = pad_along_width // 2
lowerCamelCase__ = pad_along_width - pad_left
lowerCamelCase__ = pad_along_height // 2
lowerCamelCase__ = pad_along_height - pad_top
lowerCamelCase__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__lowerCAmelCase , __lowerCAmelCase , """constant""" , 0.0 )
class UpperCamelCase__ (nn.Module ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = 1 ,_lowerCAmelCase = 1 ,_lowerCAmelCase = False ,_lowerCAmelCase = True ,_lowerCAmelCase = True ,):
super().__init__()
lowerCamelCase__ = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
lowerCamelCase__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowerCamelCase__ = nn.Convad(
in_channels=_lowerCAmelCase ,out_channels=_lowerCAmelCase ,kernel_size=_lowerCAmelCase ,stride=_lowerCAmelCase ,padding=_lowerCAmelCase ,groups=_lowerCAmelCase ,bias=_lowerCAmelCase ,padding_mode="""zeros""" ,)
if use_normalization:
lowerCamelCase__ = nn.BatchNormad(
num_features=_lowerCAmelCase ,eps=config.layer_norm_eps ,momentum=0.9997 ,affine=_lowerCAmelCase ,track_running_stats=_lowerCAmelCase ,)
else:
lowerCamelCase__ = None
if use_activation:
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act ,_lowerCAmelCase ):
lowerCamelCase__ = ACTaFN[config.hidden_act]
else:
lowerCamelCase__ = config.hidden_act
else:
lowerCamelCase__ = None
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if self.config.tf_padding:
lowerCamelCase__ = apply_tf_padding(_lowerCAmelCase ,self.convolution )
lowerCamelCase__ = self.convolution(_lowerCAmelCase )
if self.normalization is not None:
lowerCamelCase__ = self.normalization(_lowerCAmelCase )
if self.activation is not None:
lowerCamelCase__ = self.activation(_lowerCAmelCase )
return features
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = MobileNetVaConfig
_UpperCamelCase = load_tf_weights_in_mobilenet_va
_UpperCamelCase = 'mobilenet_v1'
_UpperCamelCase = 'pixel_values'
_UpperCamelCase = False
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if isinstance(_lowerCAmelCase ,(nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_lowerCAmelCase ,nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCamelCase : int = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCamelCase : List[str] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' ,a ,)
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = True ):
super().__init__(_lowerCAmelCase )
lowerCamelCase__ = config
lowerCamelCase__ = 32
lowerCamelCase__ = max(int(depth * config.depth_multiplier ) ,config.min_depth )
lowerCamelCase__ = MobileNetVaConvLayer(
_lowerCAmelCase ,in_channels=config.num_channels ,out_channels=_lowerCAmelCase ,kernel_size=3 ,stride=2 ,)
lowerCamelCase__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowerCamelCase__ = nn.ModuleList()
for i in range(13 ):
lowerCamelCase__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowerCamelCase__ = max(int(depth * config.depth_multiplier ) ,config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_lowerCAmelCase ,in_channels=_lowerCAmelCase ,out_channels=_lowerCAmelCase ,kernel_size=3 ,stride=strides[i] ,groups=_lowerCAmelCase ,) )
self.layer.append(
MobileNetVaConvLayer(
_lowerCAmelCase ,in_channels=_lowerCAmelCase ,out_channels=_lowerCAmelCase ,kernel_size=1 ,) )
lowerCamelCase__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
raise NotImplementedError
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_lowerCAmelCase ,config_class=_CONFIG_FOR_DOC ,modality="""vision""" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,):
lowerCamelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
lowerCamelCase__ = self.conv_stem(_lowerCAmelCase )
lowerCamelCase__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowerCamelCase__ = layer_module(_lowerCAmelCase )
if output_hidden_states:
lowerCamelCase__ = all_hidden_states + (hidden_states,)
lowerCamelCase__ = hidden_states
if self.pooler is not None:
lowerCamelCase__ = torch.flatten(self.pooler(_lowerCAmelCase ) ,start_dim=1 )
else:
lowerCamelCase__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCAmelCase ,pooler_output=_lowerCAmelCase ,hidden_states=_lowerCAmelCase ,)
@add_start_docstrings(
'\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' ,a ,)
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ):
super().__init__(_lowerCAmelCase )
lowerCamelCase__ = config.num_labels
lowerCamelCase__ = MobileNetVaModel(_lowerCAmelCase )
lowerCamelCase__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowerCamelCase__ = nn.Dropout(config.classifier_dropout_prob ,inplace=_lowerCAmelCase )
lowerCamelCase__ = nn.Linear(_lowerCAmelCase ,config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_lowerCAmelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,):
lowerCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ = self.mobilenet_va(_lowerCAmelCase ,output_hidden_states=_lowerCAmelCase ,return_dict=_lowerCAmelCase )
lowerCamelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase__ = self.classifier(self.dropout(_lowerCAmelCase ) )
lowerCamelCase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase__ = """single_label_classification"""
else:
lowerCamelCase__ = """multi_label_classification"""
if self.config.problem_type == "regression":
lowerCamelCase__ = MSELoss()
if self.num_labels == 1:
lowerCamelCase__ = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
lowerCamelCase__ = loss_fct(_lowerCAmelCase ,_lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
lowerCamelCase__ = CrossEntropyLoss()
lowerCamelCase__ = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase__ = BCEWithLogitsLoss()
lowerCamelCase__ = loss_fct(_lowerCAmelCase ,_lowerCAmelCase )
if not return_dict:
lowerCamelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_lowerCAmelCase ,logits=_lowerCAmelCase ,hidden_states=outputs.hidden_states ,)
| 9 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def A__ ( __lowerCAmelCase : Union[str, Any] ):
lowerCamelCase__ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Tuple ):
lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
lowerCamelCase__ = emb.weight.data
return lin_layer
def A__ ( __lowerCAmelCase : Dict ):
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
lowerCamelCase__ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
lowerCamelCase__ = mam_aaa["""model"""]
remove_ignore_keys_(__lowerCAmelCase )
lowerCamelCase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0]
lowerCamelCase__ = MaMaaaConfig(
vocab_size=__lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
lowerCamelCase__ = state_dict["""decoder.embed_tokens.weight"""]
lowerCamelCase__ = MaMaaaForConditionalGeneration(__lowerCAmelCase )
model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
lowerCamelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase : Tuple = parser.parse_args()
UpperCamelCase : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 9 | 1 |
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : str = abs(__a )
_a : Any = 0
while n > 0:
res += n % 10
n //= 10
return res
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : Union[str, Any] = abs(__a )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
return sum(int(__a ) for c in str(abs(__a ) ) )
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__a : Callable ,__a : int ) -> None:
_a : str = F"""{func.__name__}({value})"""
_a : List[Any] = timeit(F"""__main__.{call}""" ,setup='''import __main__''' )
print(F"""{call:56} = {func(__a )} -- {timing:.4f} seconds""" )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__a ,__a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 14 | """simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__lowercase : Tuple = logging.get_logger(__name__)
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = ['''pixel_values''']
def __init__( self : Optional[int] , A_ : bool = True , A_ : Optional[Dict[str, int]] = None , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[float, List[float]]] = None , **A_ : str , ) -> None:
super().__init__(**A_ )
__snake_case = size if size is not None else {'''shortest_edge''': 256}
__snake_case = get_size_dict(A_ , default_to_square=A_ )
__snake_case = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__snake_case = get_size_dict(A_ , param_name='''crop_size''' )
__snake_case = do_resize
__snake_case = size
__snake_case = resample
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_rescale
__snake_case = rescale_factor
__snake_case = do_normalize
__snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase ( self : List[str] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : List[Any] , ) -> np.ndarray:
__snake_case = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__snake_case = get_resize_output_image_size(A_ , size=size['''shortest_edge'''] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def lowercase ( self : Tuple , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Union[str, Any] , ) -> np.ndarray:
__snake_case = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(A_ , size=(size['''height'''], size['''width''']) , data_format=A_ , **A_ )
def lowercase ( self : Optional[int] , A_ : np.ndarray , A_ : float , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : int ) -> np.ndarray:
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def lowercase ( self : Tuple , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Tuple , ) -> np.ndarray:
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def lowercase ( self : List[Any] , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Dict[str, int] = None , A_ : PILImageResampling = None , A_ : bool = None , A_ : Dict[str, int] = None , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A_ : Dict , ) -> Optional[Any]:
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = size if size is not None else self.size
__snake_case = get_size_dict(A_ , default_to_square=A_ )
__snake_case = resample if resample is not None else self.resample
__snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case = crop_size if crop_size is not None else self.crop_size
__snake_case = get_size_dict(A_ , param_name='''crop_size''' )
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = do_normalize if do_normalize is not None else self.do_normalize
__snake_case = image_mean if image_mean is not None else self.image_mean
__snake_case = image_std if image_std is not None else self.image_std
__snake_case = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(A_ ) for image in images]
if do_resize:
__snake_case = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
__snake_case = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
__snake_case = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
__snake_case = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
__snake_case = [to_channel_dimension_format(A_ , A_ ) for image in images]
__snake_case = {'''pixel_values''': images}
return BatchFeature(data=A_ , tensor_type=A_ )
def lowercase ( self : List[str] , A_ : Optional[Any] , A_ : List[Tuple] = None ) -> List[Any]:
__snake_case = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(A_ ):
__snake_case = target_sizes.numpy()
__snake_case = []
for idx in range(len(A_ ) ):
__snake_case = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A_ )
__snake_case = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
__snake_case = logits.argmax(dim=1 )
__snake_case = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 564 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCamelCase__ ( ) -> Any:
__UpperCAmelCase: Optional[int] = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
__UpperCAmelCase: Optional[int] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert("""RGB""" )
return image
def UpperCamelCase__ ( _lowercase : Dict ) -> Optional[Any]:
__UpperCAmelCase: Optional[Any] = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") )
# fmt: on
return rename_keys
def UpperCamelCase__ ( _lowercase : Any , _lowercase : Any , _lowercase : Dict ) -> Union[str, Any]:
__UpperCAmelCase: List[str] = dct.pop(_lowercase )
__UpperCAmelCase: Dict = val
def UpperCamelCase__ ( _lowercase : List[str] , _lowercase : Dict ) -> Optional[int]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__UpperCAmelCase: Optional[Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
__UpperCAmelCase: Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__UpperCAmelCase: Optional[int] = torch.cat((q_bias, torch.zeros_like(_lowercase , requires_grad=_lowercase ), v_bias) )
__UpperCAmelCase: Tuple = qkv_bias
def UpperCamelCase__ ( _lowercase : str ) -> List[Any]:
__UpperCAmelCase: List[Any] = 3_6_4 if """coco""" in model_name else 2_2_4
__UpperCAmelCase: List[str] = InstructBlipVisionConfig(image_size=_lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
__UpperCAmelCase: Union[str, Any] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__UpperCAmelCase: str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
__UpperCAmelCase: Optional[int] = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" , vocab_size=3_2_0_0_1 ).to_dict()
elif "vicuna-13b" in model_name:
__UpperCAmelCase: Optional[Any] = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" , vocab_size=3_2_0_0_1 ).to_dict()
else:
raise ValueError("""Model name not supported""" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
__UpperCAmelCase: List[str] = InstructBlipQFormerConfig(vocab_size=3_0_5_2_3 ).to_dict()
__UpperCAmelCase: Dict = InstructBlipConfig(vision_config=_lowercase , text_config=_lowercase , qformer_config=_lowercase )
return config, image_size
@torch.no_grad()
def UpperCamelCase__ ( _lowercase : str , _lowercase : Union[str, Any]=None , _lowercase : Optional[int]=False ) -> str:
__UpperCAmelCase: Any = AutoTokenizer.from_pretrained("""bert-base-uncased""" , truncation_side="""left""" )
qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} )
if "t5" in model_name:
__UpperCAmelCase: Tuple = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" , truncation_side="""left""" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
__UpperCAmelCase: int = LlamaTokenizerFast.from_pretrained(
"""huggyllama/llama-7b""" , truncation_side="""left""" , bos_token="""</s>""" , unk_token="""</s>""" )
tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} )
__UpperCAmelCase, __UpperCAmelCase: Optional[Any] = get_blipa_config(_lowercase )
__UpperCAmelCase: Optional[int] = InstructBlipForConditionalGeneration(_lowercase ).eval()
__UpperCAmelCase: int = {
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
__UpperCAmelCase, __UpperCAmelCase: Any = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
__UpperCAmelCase: List[Any] = """cuda:1""" if torch.cuda.is_available() else """cpu"""
__UpperCAmelCase: Tuple = """cuda:2""" if torch.cuda.is_available() else """cpu"""
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: str = load_model_and_preprocess(
name=_lowercase , model_type=_lowercase , is_eval=_lowercase , device=_lowercase )
original_model.eval()
print("""Done!""" )
# update state dict keys
__UpperCAmelCase: Optional[int] = original_model.state_dict()
__UpperCAmelCase: Optional[Any] = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__UpperCAmelCase: List[str] = state_dict.pop(_lowercase )
if key.startswith("""Qformer.bert""" ):
__UpperCAmelCase: Any = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
__UpperCAmelCase: int = key.replace("""self""" , """attention""" )
if "llm_proj" in key:
__UpperCAmelCase: Optional[int] = key.replace("""llm_proj""" , """language_projection""" )
if "t5_proj" in key:
__UpperCAmelCase: List[Any] = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""llm_model""" ):
__UpperCAmelCase: List[Any] = key.replace("""llm_model""" , """language_model""" )
if key.startswith("""t5""" ):
__UpperCAmelCase: Optional[int] = key.replace("""t5""" , """language""" )
__UpperCAmelCase: Any = val
# read in qv biases
read_in_q_v_bias(_lowercase , _lowercase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(_lowercase , strict=_lowercase )
__UpperCAmelCase: List[str] = load_demo_image()
__UpperCAmelCase: Optional[Any] = """What is unusual about this image?"""
# create processor
__UpperCAmelCase: Union[str, Any] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowercase , image_std=_lowercase )
__UpperCAmelCase: Any = InstructBlipProcessor(
image_processor=_lowercase , tokenizer=_lowercase , qformer_tokenizer=_lowercase , )
__UpperCAmelCase: List[str] = processor(images=_lowercase , text=_lowercase , return_tensors="""pt""" ).to(_lowercase )
# make sure processor creates exact same pixel values
__UpperCAmelCase: str = vis_processors["""eval"""](_lowercase ).unsqueeze(0 ).to(_lowercase )
__UpperCAmelCase: List[Any] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , _lowercase )
original_model.to(_lowercase )
hf_model.to(_lowercase )
with torch.no_grad():
if "vicuna" in model_name:
__UpperCAmelCase: List[str] = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits
__UpperCAmelCase: List[Any] = hf_model(**_lowercase ).logits
else:
__UpperCAmelCase: Union[str, Any] = original_model(
{"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits
__UpperCAmelCase: Dict = tokenizer("""\n""" , return_tensors="""pt""" ).input_ids.to(_lowercase )
__UpperCAmelCase: List[str] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_0_0 )
__UpperCAmelCase: str = hf_model(**_lowercase , labels=_lowercase ).logits
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
__UpperCAmelCase: Union[str, Any] = 1E-4 if """vicuna""" in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , _lowercase , atol=_lowercase )
print("""Looks ok!""" )
print("""Generating with original model...""" )
__UpperCAmelCase: str = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("""Generating with HF model...""" )
__UpperCAmelCase: Tuple = hf_model.generate(
**_lowercase , do_sample=_lowercase , num_beams=5 , max_length=2_5_6 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
__UpperCAmelCase: str = 2
print("""Original generation:""" , _lowercase )
__UpperCAmelCase: List[str] = processor.batch_decode(_lowercase , skip_special_tokens=_lowercase )
__UpperCAmelCase: Dict = [text.strip() for text in output_text]
print("""HF generation:""" , _lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowercase )
hf_model.save_pretrained(_lowercase )
if push_to_hub:
processor.push_to_hub(F'''Salesforce/{model_name}''' )
hf_model.push_to_hub(F'''Salesforce/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE_ = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 466 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = """timesformer"""
def __init__( self , snake_case_=224 , snake_case_=16 , snake_case_=3 , snake_case_=8 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.0_2 , snake_case_=1e-6 , snake_case_=True , snake_case_="divided_space_time" , snake_case_=0 , **snake_case_ , ):
'''simple docstring'''
super().__init__(**snake_case_ )
__UpperCAmelCase: Tuple = image_size
__UpperCAmelCase: List[Any] = patch_size
__UpperCAmelCase: Optional[Any] = num_channels
__UpperCAmelCase: int = num_frames
__UpperCAmelCase: List[str] = hidden_size
__UpperCAmelCase: List[str] = num_hidden_layers
__UpperCAmelCase: Dict = num_attention_heads
__UpperCAmelCase: List[str] = intermediate_size
__UpperCAmelCase: Union[str, Any] = hidden_act
__UpperCAmelCase: Dict = hidden_dropout_prob
__UpperCAmelCase: Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase: Union[str, Any] = initializer_range
__UpperCAmelCase: List[Any] = layer_norm_eps
__UpperCAmelCase: int = qkv_bias
__UpperCAmelCase: str = attention_type
__UpperCAmelCase: Tuple = drop_path_rate | 466 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase__ = '''\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'''
def snake_case_ ( A_ : List[Any], A_ : Dict, A_ : Tuple=8 ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCamelCase : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __snake_case ( a__):
def __init__( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_lowerCamelCase , scheduler=_lowerCamelCase , movq=_lowerCamelCase , )
_lowerCamelCase : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if latents is None:
_lowerCamelCase : Optional[int] = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_lowerCamelCase : List[str] = latents.to(_lowerCamelCase )
_lowerCamelCase : List[Any] = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Union[str, Any]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_lowerCamelCase : Tuple = torch.device(f'''cuda:{gpu_id}''' )
_lowerCamelCase : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : int=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_lowerCamelCase : Optional[int] = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCamelCase : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCamelCase : Optional[Any] = cpu_offload_with_hook(_lowerCamelCase , _lowerCamelCase , prev_module_hook=_lowerCamelCase )
# We'll offload the last model manually.
_lowerCamelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCamelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowerCamelCase )
def __call__( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Tuple = 5_1_2 , __lowerCAmelCase : str = 5_1_2 , __lowerCAmelCase : int = 1_0_0 , __lowerCAmelCase : Optional[int] = 4.0 , __lowerCAmelCase : Optional[int] = 1 , __lowerCAmelCase : str = None , __lowerCAmelCase : Optional[Any] = None , __lowerCAmelCase : Any = "pil" , __lowerCAmelCase : int = True , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self._execution_device
_lowerCamelCase : Dict = guidance_scale > 1.0
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = torch.cat(_lowerCamelCase , dim=0 )
_lowerCamelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : str = torch.cat(_lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : Dict = image_embeds.repeat_interleave(_lowerCamelCase , dim=0 )
_lowerCamelCase : Dict = negative_image_embeds.repeat_interleave(_lowerCamelCase , dim=0 )
_lowerCamelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCamelCase )
self.scheduler.set_timesteps(_lowerCamelCase , device=_lowerCamelCase )
_lowerCamelCase : Tuple = self.scheduler.timesteps
_lowerCamelCase : Any = self.unet.config.in_channels
_lowerCamelCase : Optional[int] = downscale_height_and_width(_lowerCamelCase , _lowerCamelCase , self.movq_scale_factor )
# create initial latent
_lowerCamelCase : Optional[int] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Any = {"""image_embeds""": image_embeds}
_lowerCamelCase : Dict = self.unet(
sample=_lowerCamelCase , timestep=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , added_cond_kwargs=_lowerCamelCase , return_dict=_lowerCamelCase , )[0]
if do_classifier_free_guidance:
_lowerCamelCase : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCamelCase : Union[str, Any] = noise_pred.chunk(2 )
_lowerCamelCase : List[Any] = variance_pred.chunk(2 )
_lowerCamelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCamelCase : Dict = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCamelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Union[str, Any] = self.scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase , )[0]
# post-processing
_lowerCamelCase : List[Any] = self.movq.decode(_lowerCamelCase , force_not_quantize=_lowerCamelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_lowerCamelCase : Any = image * 0.5 + 0.5
_lowerCamelCase : Tuple = image.clamp(0 , 1 )
_lowerCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase : Tuple = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 83 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__A ='<<<<<<< This should probably be modified because it mentions: '
__A ='=======\n>>>>>>>\n'
__A =[
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
__A =[
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def _UpperCamelCase ( UpperCamelCase__ ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _snake_case ( a__ ):
@staticmethod
def snake_case__ ( _lowerCamelCase):
UpperCAmelCase__ : List[str] = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=_lowerCamelCase , required=_lowerCamelCase , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=_lowerCamelCase , required=_lowerCamelCase , help="""Path to the HuggingFace Datasets folder.""")
train_parser.set_defaults(func=_lowerCamelCase)
def __init__( self , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = get_logger("""datasets-cli/converting""")
UpperCAmelCase__ : Any = tfds_path
UpperCAmelCase__ : Any = datasets_directory
def snake_case__ ( self):
if os.path.isdir(self._tfds_path):
UpperCAmelCase__ : Dict = os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
UpperCAmelCase__ : str = os.path.dirname(self._tfds_path)
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""")
UpperCAmelCase__ : List[Any] = os.path.abspath(self._datasets_directory)
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''')
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Optional[Any] = {}
if os.path.isdir(self._tfds_path):
UpperCAmelCase__ : Dict = os.listdir(_lowerCamelCase)
else:
UpperCAmelCase__ : List[Any] = [os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''')
UpperCAmelCase__ : str = os.path.join(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase)
if not os.path.isfile(_lowerCamelCase) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""")
continue
with open(_lowerCamelCase , encoding="""utf-8""") as f:
UpperCAmelCase__ : Optional[Any] = f.readlines()
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[Any] = []
for line in lines:
UpperCAmelCase__ : Optional[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
UpperCAmelCase__ : str = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
UpperCAmelCase__ : Optional[Any] = """"""
continue
elif "from absl import logging" in out_line:
UpperCAmelCase__ : List[Any] = """from datasets import logging\n"""
elif "getLogger" in out_line:
UpperCAmelCase__ : Optional[int] = out_line.replace("""getLogger""" , """get_logger""")
elif any(expression in out_line for expression in TO_HIGHLIGHT):
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : List[Any] = list(filter(lambda _lowerCamelCase: e in out_line , _lowerCamelCase))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCamelCase) + """\n""")
out_lines.append(_lowerCamelCase)
out_lines.append(_lowerCamelCase)
continue
else:
for pattern, replacement in TO_CONVERT:
UpperCAmelCase__ : Optional[Any] = re.sub(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
UpperCAmelCase__ : List[str] = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , _lowerCamelCase)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(""","""))
UpperCAmelCase__ : Dict = """from . import """ + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''')
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
UpperCAmelCase__ : int = True
out_lines.append(_lowerCamelCase)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
UpperCAmelCase__ : Optional[Any] = f_name.replace(""".py""" , """""")
UpperCAmelCase__ : Union[str, Any] = os.path.join(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : int = os.path.join(_lowerCamelCase , _lowerCamelCase)
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase)
self._logger.info(f'''Adding directory {output_dir}''')
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(_lowerCamelCase)
if needs_manual_update:
with_manual_update.append(_lowerCamelCase)
with open(_lowerCamelCase , """w""" , encoding="""utf-8""") as f:
f.writelines(_lowerCamelCase)
self._logger.info(f'''Converted in {output_file}''')
for utils_file in utils_files:
try:
UpperCAmelCase__ : Optional[int] = os.path.basename(_lowerCamelCase)
UpperCAmelCase__ : int = imports_to_builder_map[f_name.replace(""".py""" , """""")]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''')
shutil.copy(_lowerCamelCase , _lowerCamelCase)
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''')
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''') | 407 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Tuple = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "visual_bert"
def __init__( self , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=512 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1e-12 , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=1 , UpperCamelCase=0 , UpperCamelCase=2 , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = visual_embedding_dim
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = bypass_transformer
lowerCamelCase_ = special_visual_initialize
| 445 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=7 , UpperCamelCase=3 , UpperCamelCase=30 , UpperCamelCase=400 , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=1 / 255 , UpperCamelCase=True , UpperCamelCase=[0.5, 0.5, 0.5] , UpperCamelCase=[0.5, 0.5, 0.5] , UpperCamelCase=True , ):
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = do_pad
def snake_case ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def snake_case ( self , UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
if not batched:
lowerCamelCase_ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
lowerCamelCase_ ,lowerCamelCase_ = image.size
else:
lowerCamelCase_ ,lowerCamelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ = int(self.size["shortest_edge"] * h / w )
lowerCamelCase_ = self.size["shortest_edge"]
elif w > h:
lowerCamelCase_ = self.size["shortest_edge"]
lowerCamelCase_ = int(self.size["shortest_edge"] * w / h )
else:
lowerCamelCase_ = self.size["shortest_edge"]
lowerCamelCase_ = self.size["shortest_edge"]
else:
lowerCamelCase_ = []
for image in image_inputs:
lowerCamelCase_ ,lowerCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
lowerCamelCase_ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = DetrImageProcessor if is_vision_available() else None
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = DetrImageProcessingTester(self )
@property
def snake_case ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(UpperCamelCase , "rescale_factor" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase , "size" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_pad" ) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
lowerCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case ( self ):
"""simple docstring"""
# prepare image and target
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {"image_id": 3_9769, "annotations": target}
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
lowerCamelCase_ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="pt" )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCamelCase ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCamelCase ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCamelCase ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCamelCase ) )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCamelCase ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCamelCase ) )
@slow
def snake_case ( self ):
"""simple docstring"""
# prepare image, target and masks_path
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
lowerCamelCase_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
lowerCamelCase_ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="pt" )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCamelCase ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCamelCase ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCamelCase ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCamelCase ) )
# verify masks
lowerCamelCase_ = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , UpperCamelCase )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCamelCase ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCamelCase ) )
| 445 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ :
def __init__( self : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict=12 , __UpperCamelCase : int=7 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Dict=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : List[str]=99 , __UpperCamelCase : Optional[Any]=32 , __UpperCamelCase : Tuple=32 , __UpperCamelCase : int=2 , __UpperCamelCase : Dict=4 , __UpperCamelCase : str=37 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Optional[Any]=5_12 , __UpperCamelCase : Optional[Any]=0.0_2 , __UpperCamelCase : Tuple=0 , __UpperCamelCase : Any=None , ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = projection_dim
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
_UpperCAmelCase = bos_token_id
def _snake_case ( self : List[str] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_UpperCAmelCase = input_mask.numpy()
_UpperCAmelCase ,_UpperCAmelCase = input_mask.shape
_UpperCAmelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCamelCase ):
_UpperCAmelCase = 1
_UpperCAmelCase = 0
_UpperCAmelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(__UpperCamelCase )
def _snake_case ( self : str ) ->List[str]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _snake_case ( self : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = TFBlipTextModel(config=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , training=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class a_ ( _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (TFBlipTextModel,) if is_tf_available() else ()
a : str = False
a : Optional[Any] = False
a : str = False
def _snake_case ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = BlipTextModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _snake_case ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
pass
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def _snake_case ( self : str ) ->Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def _snake_case ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def _snake_case ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
pass
@slow
def _snake_case ( self : str ) ->Optional[Any]:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFBlipTextModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _snake_case ( self : Any , __UpperCamelCase : List[str]=True ) ->int:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=__UpperCamelCase ) | 555 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a : Dict = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def _UpperCamelCase ( _A , _A , _A ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = state_dict.pop(_A )
_UpperCAmelCase = val
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_UpperCAmelCase = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
return new_state_dict
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_UpperCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[:2_5_6, :]
_UpperCAmelCase = in_proj_bias[:2_5_6]
_UpperCAmelCase = in_proj_weight[2_5_6:5_1_2, :]
_UpperCAmelCase = in_proj_bias[2_5_6:5_1_2]
_UpperCAmelCase = in_proj_weight[-2_5_6:, :]
_UpperCAmelCase = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_UpperCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
_UpperCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[:2_5_6, :]
_UpperCAmelCase = in_proj_bias[:2_5_6]
_UpperCAmelCase = in_proj_weight[2_5_6:5_1_2, :]
_UpperCAmelCase = in_proj_bias[2_5_6:5_1_2]
_UpperCAmelCase = in_proj_weight[-2_5_6:, :]
_UpperCAmelCase = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
_UpperCAmelCase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
_UpperCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_UpperCAmelCase = in_proj_weight_cross_attn[:2_5_6, :]
_UpperCAmelCase = in_proj_bias_cross_attn[:2_5_6]
_UpperCAmelCase = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
_UpperCAmelCase = in_proj_bias_cross_attn[2_5_6:5_1_2]
_UpperCAmelCase = in_proj_weight_cross_attn[-2_5_6:, :]
_UpperCAmelCase = in_proj_bias_cross_attn[-2_5_6:]
def _UpperCamelCase ( _A , _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase ,_UpperCAmelCase = image.size
_UpperCAmelCase = max(_A , _A )
_UpperCAmelCase = 8_0_0 if """detection""" in checkpoint_url else 1_0_0_0
_UpperCAmelCase = target_max_size / current_max_size
_UpperCAmelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = F.to_tensor(_A )
_UpperCAmelCase = F.normalize(_A , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _UpperCamelCase ( _A , _A , _A ) -> str:
"""simple docstring"""
logger.info("""Converting model...""" )
# load original state dict
_UpperCAmelCase = torch.hub.load_state_dict_from_url(_A , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(_A , _A , _A )
_UpperCAmelCase = rename_backbone_keys(_A )
# query, key and value matrices need special treatment
read_in_q_k_v(_A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCAmelCase = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_UpperCAmelCase = state_dict.pop(_A )
_UpperCAmelCase = val
# create HuggingFace model and load state dict
_UpperCAmelCase = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
_UpperCAmelCase = 1_5
_UpperCAmelCase = 2
_UpperCAmelCase = {0: """table""", 1: """table rotated"""}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
else:
_UpperCAmelCase = 1_2_5
_UpperCAmelCase = 6
_UpperCAmelCase = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = DetrImageProcessor(
format="""coco_detection""" , max_size=8_0_0 if """detection""" in checkpoint_url else 1_0_0_0 )
_UpperCAmelCase = TableTransformerForObjectDetection(_A )
model.load_state_dict(_A )
model.eval()
# verify our conversion
_UpperCAmelCase = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
_UpperCAmelCase = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=_A )
_UpperCAmelCase = Image.open(_A ).convert("""RGB""" )
_UpperCAmelCase = normalize(resize(_A , _A ) ).unsqueeze(0 )
_UpperCAmelCase = model(_A )
if "detection" in checkpoint_url:
_UpperCAmelCase = (1, 1_5, 3)
_UpperCAmelCase = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
_UpperCAmelCase = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
_UpperCAmelCase = (1, 1_2_5, 7)
_UpperCAmelCase = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
_UpperCAmelCase = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _A , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _A , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
_UpperCAmelCase = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(_A )
image_processor.push_to_hub(_A )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a : int = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 555 | 1 |
'''simple docstring'''
def lowerCamelCase_ ( A_ , A_ ):
__lowerCamelCase = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
__lowerCamelCase = n - k
# Calculate C(n,k)
for i in range(A_ ):
result *= n - i
result //= i + 1
return result
def lowerCamelCase_ ( A_ ):
return binomial_coefficient(2 * node_count , A_ ) // (node_count + 1)
def lowerCamelCase_ ( A_ ):
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
__lowerCamelCase = 1
for i in range(1 , n + 1 ):
result *= i
return result
def lowerCamelCase_ ( A_ ):
return catalan_number(A_ ) * factorial(A_ )
if __name__ == "__main__":
_UpperCamelCase : List[str] =int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 575 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Dict =logging.get_logger(__name__)
_UpperCamelCase : List[str] ={
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'informer'
SCREAMING_SNAKE_CASE_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , _snake_case = None , _snake_case = None , _snake_case = "student_t" , _snake_case = "nll" , _snake_case = 1 , _snake_case = None , _snake_case = "mean" , _snake_case = 0 , _snake_case = 0 , _snake_case = 0 , _snake_case = 0 , _snake_case = None , _snake_case = None , _snake_case = 64 , _snake_case = 32 , _snake_case = 32 , _snake_case = 2 , _snake_case = 2 , _snake_case = 2 , _snake_case = 2 , _snake_case = True , _snake_case = "gelu" , _snake_case = 0.0_5 , _snake_case = 0.1 , _snake_case = 0.1 , _snake_case = 0.1 , _snake_case = 0.1 , _snake_case = 1_00 , _snake_case = 0.0_2 , _snake_case=True , _snake_case = "prob" , _snake_case = 5 , _snake_case = True , **_snake_case , ):
"""simple docstring"""
__lowerCamelCase = prediction_length
__lowerCamelCase = context_length or prediction_length
__lowerCamelCase = distribution_output
__lowerCamelCase = loss
__lowerCamelCase = input_size
__lowerCamelCase = num_time_features
__lowerCamelCase = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__lowerCamelCase = scaling
__lowerCamelCase = num_dynamic_real_features
__lowerCamelCase = num_static_real_features
__lowerCamelCase = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_snake_case ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__lowerCamelCase = cardinality
else:
__lowerCamelCase = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_snake_case ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__lowerCamelCase = embedding_dimension
else:
__lowerCamelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowerCamelCase = num_parallel_samples
# Transformer architecture configuration
__lowerCamelCase = input_size * len(self.lags_sequence ) + self._number_of_features
__lowerCamelCase = d_model
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = use_cache
# Informer
__lowerCamelCase = attention_type
__lowerCamelCase = sampling_factor
__lowerCamelCase = distil
super().__init__(is_encoder_decoder=_snake_case , **_snake_case )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 575 | 1 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __A :
"""simple docstring"""
def __init__( self , a__ , a__=2 , a__=32 , a__=16 , a__=3 , a__=True , a__=True , a__=32 , a__=4 , a__=[0, 1, 2, 3] , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=0.02 , a__=3 , a__=[1, 384, 24, 24] , a__=True , a__=None , ):
"""simple docstring"""
_lowerCamelCase : Tuple = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : Optional[int] = patch_size
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : Any = is_training
_lowerCamelCase : Tuple = use_labels
_lowerCamelCase : int = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : Optional[int] = backbone_out_indices
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[int] = attention_probs_dropout_prob
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : Optional[int] = num_labels
_lowerCamelCase : List[str] = backbone_featmap_shape
_lowerCamelCase : int = scope
_lowerCamelCase : str = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_lowerCamelCase : Dict = (image_size // patch_size) ** 2
_lowerCamelCase : Optional[int] = num_patches + 1
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
_lowerCamelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[str] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a__ , backbone_featmap_shape=self.backbone_featmap_shape , )
def __snake_case ( self , a__ , a__ , a__):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = DPTModel(config=a__)
model.to(a__)
model.eval()
_lowerCamelCase : Optional[Any] = model(a__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __snake_case ( self , a__ , a__ , a__):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.num_labels
_lowerCamelCase : List[Any] = DPTForDepthEstimation(a__)
model.to(a__)
model.eval()
_lowerCamelCase : str = model(a__)
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size))
def __snake_case ( self , a__ , a__ , a__):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.num_labels
_lowerCamelCase : Optional[int] = DPTForSemanticSegmentation(a__)
model.to(a__)
model.eval()
_lowerCamelCase : List[Any] = model(a__ , labels=a__)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = config_and_inputs
_lowerCamelCase : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( lowerCamelCase__ ,lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[int] = DPTModelTester(self)
_lowerCamelCase : Tuple = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37)
def __snake_case ( self):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''')
def __snake_case ( self):
"""simple docstring"""
pass
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(a__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_lowerCamelCase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear))
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(a__)
_lowerCamelCase : Optional[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Union[str, Any] = [*signature.parameters.keys()]
_lowerCamelCase : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a__)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a__)
def __snake_case ( self):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowerCamelCase, _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Any = True
if model_class in get_values(a__):
continue
_lowerCamelCase : Optional[int] = model_class(a__)
model.to(a__)
model.train()
_lowerCamelCase : Optional[Any] = self._prepare_for_class(a__ , a__ , return_labels=a__)
_lowerCamelCase : List[Any] = model(**a__).loss
loss.backward()
def __snake_case ( self):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = False
_lowerCamelCase : str = True
if model_class in get_values(a__) or not model_class.supports_gradient_checkpointing:
continue
_lowerCamelCase : Any = model_class(a__)
model.to(a__)
model.gradient_checkpointing_enable()
model.train()
_lowerCamelCase : List[Any] = self._prepare_for_class(a__ , a__ , return_labels=a__)
_lowerCamelCase : Optional[int] = model(**a__).loss
loss.backward()
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Any = _config_zero_init(a__)
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(config=a__)
# Skip the check for the backbone
_lowerCamelCase : Union[str, Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_lowerCamelCase : List[Any] = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def __snake_case ( self):
"""simple docstring"""
pass
@slow
def __snake_case ( self):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_lowerCamelCase : List[Any] = DPTModel.from_pretrained(a__)
self.assertIsNotNone(a__)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Dict = '''add'''
with self.assertRaises(a__):
_lowerCamelCase : List[Any] = DPTForDepthEstimation(a__)
def __UpperCAmelCase( ):
_lowerCamelCase : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class __A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : str = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''')
_lowerCamelCase : List[str] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''').to(a__)
_lowerCamelCase : Any = prepare_img()
_lowerCamelCase : str = image_processor(images=a__ , return_tensors='''pt''').to(a__)
# forward pass
with torch.no_grad():
_lowerCamelCase : Tuple = model(**a__)
_lowerCamelCase : List[Any] = outputs.predicted_depth
# verify the predicted depth
_lowerCamelCase : Any = torch.Size((1, 384, 384))
self.assertEqual(predicted_depth.shape , a__)
_lowerCamelCase : List[str] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]]).to(a__)
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , a__ , atol=1e-4))
| 114 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __UpperCAmelCase( lowercase_ ):
# vision encoder
if "img_encoder.pos_embed" in name:
_lowerCamelCase : Tuple = name.replace('''img_encoder.pos_embed''' , '''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : List[str] = name.replace('''img_encoder.patch_embed.proj''' , '''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Dict = name.replace('''img_encoder.patch_embed.norm''' , '''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
_lowerCamelCase : Dict = name.replace('''img_encoder.layers''' , '''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
_lowerCamelCase : Optional[Any] = name.replace('''blocks''' , '''layers''' )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : Tuple = name.replace('''attn''' , '''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Optional[int] = name.replace('''proj''' , '''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : List[Any] = name.replace('''pre_assign_attn.attn.proj''' , '''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
_lowerCamelCase : int = name.replace('''norm1''' , '''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace('''norm2''' , '''layer_norm2''' )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace('''img_encoder.norm''' , '''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[Any] = name.replace('''text_encoder.token_embedding''' , '''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace('''text_encoder.positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Dict = name.replace('''text_encoder.transformer.resblocks.''' , '''text_model.encoder.layers.''' )
if "ln_1" in name:
_lowerCamelCase : Tuple = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
_lowerCamelCase : List[str] = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
_lowerCamelCase : List[str] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
_lowerCamelCase : str = name.replace('''c_proj''' , '''fc2''' )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace('''text_encoder''' , '''text_model''' )
if "ln_final" in name:
_lowerCamelCase : str = name.replace('''ln_final''' , '''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : str = name.replace('''img_projector.linear_hidden.''' , '''visual_projection.''' )
if "img_projector.linear_out." in name:
_lowerCamelCase : Union[str, Any] = name.replace('''img_projector.linear_out.''' , '''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : List[str] = name.replace('''text_projector.linear_hidden''' , '''text_projection''' )
if "text_projector.linear_out" in name:
_lowerCamelCase : Any = name.replace('''text_projector.linear_out''' , '''text_projection.3''' )
return name
def __UpperCAmelCase( lowercase_ , lowercase_ ):
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Tuple = key.split('''.''' )
_lowerCamelCase, _lowerCamelCase : Dict = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : Optional[int] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : List[Any] = val[dim : dim * 2, :]
_lowerCamelCase : Optional[Any] = val[-dim:, :]
else:
_lowerCamelCase : Union[str, Any] = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Any = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[int] = key.split('''.''' )
_lowerCamelCase : Optional[Any] = int(key_split[3] )
_lowerCamelCase : Union[str, Any] = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : Any = val[:dim, :]
_lowerCamelCase : Optional[Any] = val[
dim : dim * 2, :
]
_lowerCamelCase : Dict = val[-dim:, :]
else:
_lowerCamelCase : List[Any] = val[:dim]
_lowerCamelCase : Tuple = val[dim : dim * 2]
_lowerCamelCase : str = val[-dim:]
else:
_lowerCamelCase : Optional[Any] = rename_key(lowercase_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : List[str] = val.squeeze_()
else:
_lowerCamelCase : Tuple = val
return orig_state_dict
def __UpperCAmelCase( ):
_lowerCamelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase : Any = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_="groupvit-gcc-yfcc" , lowercase_=False ):
_lowerCamelCase : Optional[Any] = GroupViTConfig()
_lowerCamelCase : Any = GroupViTModel(lowercase_ ).eval()
_lowerCamelCase : Optional[Any] = torch.load(lowercase_ , map_location='''cpu''' )['''model''']
_lowerCamelCase : List[str] = convert_state_dict(lowercase_ , lowercase_ )
_lowerCamelCase, _lowerCamelCase : List[str] = model.load_state_dict(lowercase_ , strict=lowercase_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase_ ) == 0)
# verify result
_lowerCamelCase : Optional[Any] = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
_lowerCamelCase : Dict = prepare_img()
_lowerCamelCase : str = processor(text=['''a photo of a cat''', '''a photo of a dog'''] , images=lowercase_ , padding=lowercase_ , return_tensors='''pt''' )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**lowercase_ )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : Dict = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Tuple = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , lowercase_ , atol=1e-3 )
processor.save_pretrained(lowercase_ )
model.save_pretrained(lowercase_ )
print('''Successfully saved processor and model to''' , lowercase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowercase_ , organization='''nielsr''' )
model.push_to_hub(lowercase_ , organization='''nielsr''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
_lowerCamelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 114 | 1 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
a_ : List[str] = namedtuple('covid_data', 'cases deaths recovered')
def __lowercase( UpperCAmelCase__ = "https://www.worldometers.info/coronavirus/" ) -> Any:
"""simple docstring"""
lowerCamelCase = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(UpperCAmelCase__ ).content ).xpath(UpperCAmelCase__ ) )
a_ : Optional[Any] = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats())) | 703 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
a_ : Any = None
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : int = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
a_ : Dict = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
a_ : List[str] = {
'google/rembert': 2_5_6,
}
a_ : str = '▁'
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = RemBertTokenizer
def __init__(self , __a=None , __a=None , __a=True , __a=True , __a=False , __a="[CLS]" , __a="[SEP]" , __a="<unk>" , __a="[SEP]" , __a="<pad>" , __a="[CLS]" , __a="[MASK]" , **__a , ):
'''simple docstring'''
lowerCamelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , **__a , )
lowerCamelCase = do_lower_case
lowerCamelCase = remove_space
lowerCamelCase = keep_accents
lowerCamelCase = vocab_file
lowerCamelCase = False if not self.vocab_file else True
def _a (self , __a , __a = None ):
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a (self , __a , __a = None , __a = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1]
def _a (self , __a , __a = None ):
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a (self , __a , __a = None ):
'''simple docstring'''
if not os.path.isdir(__a ):
logger.error("Vocabulary path ({}) should be a directory".format(__a ) )
return
lowerCamelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,) | 484 | 0 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A : str = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : bool , _UpperCAmelCase : str = None , _UpperCAmelCase : list = None ) -> List[Any]:
"""simple docstring"""
lowercase__ = None
lowercase__ = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
lowercase__ = os.path.abspath("""examples""" )
for item in os.listdir(_UpperCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
lowercase__ = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if os.path.isfile(_UpperCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=_UpperCAmelCase , feature_script=_UpperCAmelCase , tested_section="""main()""" if parser_only else """training_function()""" , ):
lowercase__ = compare_against_test(
os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__ = """\n""".join(_UpperCAmelCase )
if special_strings is not None:
for string in special_strings:
lowercase__ = diff.replace(_UpperCAmelCase , """""" )
self.assertEqual(_UpperCAmelCase , """""" )
def lowerCamelCase__ (self : Optional[int] ) -> int:
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , _UpperCAmelCase )
self.one_complete_example("""complete_nlp_example.py""" , _UpperCAmelCase )
def lowerCamelCase__ (self : Tuple ) -> Dict:
"""simple docstring"""
lowercase__ = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
lowercase__ = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.one_complete_example("""complete_cv_example.py""" , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = False
@classmethod
def lowerCamelCase__ (cls : Tuple ) -> int:
"""simple docstring"""
super().setUpClass()
lowercase__ = tempfile.mkdtemp()
lowercase__ = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
lowercase__ = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def lowerCamelCase__ (cls : List[Any] ) -> int:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowerCamelCase__ (self : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def lowerCamelCase__ (self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
lowercase__ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def lowerCamelCase__ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
'''.split()
lowercase__ = run_command(self._launch_args + testargs , return_stdout=_UpperCAmelCase )
self.assertNotIn("""epoch 0:""" , _UpperCAmelCase )
self.assertIn("""epoch 1:""" , _UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
'''.split()
lowercase__ = run_command(self._launch_args + testargs , return_stdout=_UpperCAmelCase )
if torch.cuda.is_available():
lowercase__ = torch.cuda.device_count()
else:
lowercase__ = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , _UpperCAmelCase )
self.assertIn("""epoch 1:""" , _UpperCAmelCase )
else:
self.assertIn("""epoch 0:""" , _UpperCAmelCase )
self.assertIn("""epoch 1:""" , _UpperCAmelCase )
@slow
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
lowercase__ = run_command(self._launch_args + testargs , return_stdout=_UpperCAmelCase )
lowercase__ = re.findall("""({.+})""" , _UpperCAmelCase )
lowercase__ = [r for r in results if """accuracy""" in r][-1]
lowercase__ = ast.literal_eval(_UpperCAmelCase )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def lowerCamelCase__ (self : List[Any] ) -> int:
"""simple docstring"""
lowercase__ = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCamelCase__ (self : List[str] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
lowercase__ = f'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """tracking""" ) ) )
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def lowerCamelCase__ (self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 15 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger()
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case = True ):
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
__SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_128s''' , pretrained=snake_case )
else:
__SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_128''' , pretrained=snake_case )
if hidden_sizes == 192:
__SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_192''' , pretrained=snake_case )
if hidden_sizes == 256:
__SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model('''levit_256''' , pretrained=snake_case )
if hidden_sizes == 384:
__SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_384''' , pretrained=snake_case )
from_model.eval()
__SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher(snake_case ).eval()
__SCREAMING_SNAKE_CASE : int = OrderedDict()
__SCREAMING_SNAKE_CASE : List[Any] = from_model.state_dict()
__SCREAMING_SNAKE_CASE : Tuple = list(from_model.state_dict().keys() )
__SCREAMING_SNAKE_CASE : str = list(our_model.state_dict().keys() )
print(len(snake_case ) , len(snake_case ) )
for i in range(len(snake_case ) ):
__SCREAMING_SNAKE_CASE : int = weights[og_keys[i]]
our_model.load_state_dict(snake_case )
__SCREAMING_SNAKE_CASE : str = torch.randn((2, 3, 224, 224) )
__SCREAMING_SNAKE_CASE : Tuple = from_model(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = our_model(snake_case ).logits
assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one."
__SCREAMING_SNAKE_CASE : Union[str, Any] = name
print(snake_case )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def a__ ( snake_case , snake_case = None , snake_case = True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json'''
__SCREAMING_SNAKE_CASE : int = 1_000
__SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels)
__SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
__SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : str = idalabel
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : List[str] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , snake_case , names_to_config[model_name] , snake_case , snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case , snake_case , snake_case , snake_case )
return config, expected_shape
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
lowercase_ = parser.parse_args()
lowercase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 74 | 0 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = old_name
if "patch_embed" in old_name:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: int = old_name.split('''.''' )
if layer == "0":
__SCREAMING_SNAKE_CASE: str = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__SCREAMING_SNAKE_CASE: str = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__SCREAMING_SNAKE_CASE: str = old_name.replace('''3''' , '''convolution2''' )
else:
__SCREAMING_SNAKE_CASE: Any = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE: Dict = R'''\b\d{2}\b'''
if bool(re.search(UpperCamelCase__ , UpperCamelCase__ ) ):
__SCREAMING_SNAKE_CASE: Union[str, Any] = re.search(R'''\d\.\d\d.''' , UpperCamelCase__ ).group()
else:
__SCREAMING_SNAKE_CASE: int = re.search(R'''\d\.\d.''' , UpperCamelCase__ ).group()
if int(match[0] ) < 6:
__SCREAMING_SNAKE_CASE: List[str] = old_name.replace(UpperCamelCase__ , '''''' )
__SCREAMING_SNAKE_CASE: Union[str, Any] = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__SCREAMING_SNAKE_CASE: Dict = '''intermediate_stages.''' + trimmed_name
else:
__SCREAMING_SNAKE_CASE: str = old_name.replace(UpperCamelCase__ , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__SCREAMING_SNAKE_CASE: Optional[int] = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__SCREAMING_SNAKE_CASE: Tuple = str(int(match[2] ) - num_meta4D_last_stage )
__SCREAMING_SNAKE_CASE: Tuple = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__SCREAMING_SNAKE_CASE: List[str] = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__SCREAMING_SNAKE_CASE: Any = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__SCREAMING_SNAKE_CASE: Any = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__SCREAMING_SNAKE_CASE: Optional[int] = trimmed_name.replace('''fc2''' , '''linear_out''' )
__SCREAMING_SNAKE_CASE: Tuple = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE: str = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__SCREAMING_SNAKE_CASE: List[Any] = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__SCREAMING_SNAKE_CASE: Optional[Any] = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__SCREAMING_SNAKE_CASE: Optional[Any] = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__SCREAMING_SNAKE_CASE: Optional[Any] = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__SCREAMING_SNAKE_CASE: int = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__SCREAMING_SNAKE_CASE: int = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__SCREAMING_SNAKE_CASE: List[Any] = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__SCREAMING_SNAKE_CASE: Dict = new_name.replace('''norm''' , '''layernorm''' )
__SCREAMING_SNAKE_CASE: Optional[int] = '''efficientformer.''' + new_name
else:
__SCREAMING_SNAKE_CASE: Any = '''efficientformer.encoder.''' + new_name
return new_name
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for key in checkpoint.copy().keys():
__SCREAMING_SNAKE_CASE: Tuple = checkpoint.pop(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: int = val
return checkpoint
def lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__SCREAMING_SNAKE_CASE: Optional[int] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return image
def lowerCAmelCase ( UpperCamelCase__ : Path , UpperCamelCase__ : Path , UpperCamelCase__ : Path , UpperCamelCase__ : bool ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
__SCREAMING_SNAKE_CASE: Dict = EfficientFormerConfig.from_json_file(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: int = EfficientFormerForImageClassificationWithTeacher(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Optional[int] = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__SCREAMING_SNAKE_CASE: Any = config.depths[-1] - config.num_metaad_blocks + 1
__SCREAMING_SNAKE_CASE: str = convert_torch_checkpoint(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE: List[str] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__SCREAMING_SNAKE_CASE: List[Any] = prepare_img()
__SCREAMING_SNAKE_CASE: List[str] = 256
__SCREAMING_SNAKE_CASE: Tuple = 224
__SCREAMING_SNAKE_CASE: List[str] = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__SCREAMING_SNAKE_CASE: int = processor(images=UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__SCREAMING_SNAKE_CASE: Union[str, Any] = Compose(
[
Resize(UpperCamelCase__ , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(UpperCamelCase__ ),
ToTensor(),
Normalize(UpperCamelCase__ , UpperCamelCase__ ),
] )
__SCREAMING_SNAKE_CASE: Optional[int] = image_transforms(UpperCamelCase__ ).unsqueeze(0 )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: List[Any] = model(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Any = outputs.logits
__SCREAMING_SNAKE_CASE: Optional[Any] = (1, 1_000)
if "l1" in model_name:
__SCREAMING_SNAKE_CASE: List[Any] = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :10] , UpperCamelCase__ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__SCREAMING_SNAKE_CASE: Dict = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :10] , UpperCamelCase__ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__SCREAMING_SNAKE_CASE: Union[str, Any] = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(UpperCamelCase__ )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message='''Add model''' , use_temp_dir=UpperCamelCase__ , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message='''Add image processor''' , use_temp_dir=UpperCamelCase__ , )
if __name__ == "__main__":
lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
lowerCAmelCase : Any = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 146 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowerCAmelCase ( UpperCamelCase__ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(UpperCamelCase__ ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
__SCREAMING_SNAKE_CASE: Dict = QuantumRegister(UpperCamelCase__ , '''qr''' )
__SCREAMING_SNAKE_CASE: Optional[Any] = ClassicalRegister(UpperCamelCase__ , '''cr''' )
__SCREAMING_SNAKE_CASE: List[str] = QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Optional[Any] = number_of_qubits
for i in range(UpperCamelCase__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCamelCase__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase__ , UpperCamelCase__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCamelCase__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCamelCase__ , UpperCamelCase__ )
# simulate with 10000 shots
__SCREAMING_SNAKE_CASE: Dict = Aer.get_backend('''qasm_simulator''' )
__SCREAMING_SNAKE_CASE: Tuple = execute(UpperCamelCase__ , UpperCamelCase__ , shots=10_000 )
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 146 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase : int = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase : Tuple = {
'squeezebert/squeezebert-uncased': 5_12,
'squeezebert/squeezebert-mnli': 5_12,
'squeezebert/squeezebert-mnli-headless': 5_12,
}
UpperCamelCase : Dict = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = SqueezeBertTokenizer
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(
_lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,)
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**_lowerCAmelCase )
lowerCamelCase__ = do_lower_case
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 50 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCamelCase : int = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 50 | 1 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCAmelCase ( UpperCamelCase__ : List[str] ) -> Any:
"""simple docstring"""
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , __lowerCAmelCase , )
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
__SCREAMING_SNAKE_CASE: Optional[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
__SCREAMING_SNAKE_CASE: List[Any] = image[0].size
__SCREAMING_SNAKE_CASE: Dict = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__SCREAMING_SNAKE_CASE: int = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__SCREAMING_SNAKE_CASE: Dict = np.concatenate(__lowerCAmelCase , axis=0 )
__SCREAMING_SNAKE_CASE: Tuple = np.array(__lowerCAmelCase ).astype(np.floataa ) / 2_55.0
__SCREAMING_SNAKE_CASE: List[Any] = image.transpose(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE: Dict = 2.0 * image - 1.0
__SCREAMING_SNAKE_CASE: Dict = torch.from_numpy(__lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
__SCREAMING_SNAKE_CASE: Union[str, Any] = torch.cat(__lowerCAmelCase , dim=0 )
return image
def lowerCAmelCase ( UpperCamelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
if isinstance(__lowerCAmelCase , torch.Tensor ):
return mask
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
__SCREAMING_SNAKE_CASE: Optional[Any] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__SCREAMING_SNAKE_CASE: Union[str, Any] = mask[0].size
__SCREAMING_SNAKE_CASE: List[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__SCREAMING_SNAKE_CASE: Dict = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__SCREAMING_SNAKE_CASE: int = np.concatenate(__lowerCAmelCase , axis=0 )
__SCREAMING_SNAKE_CASE: List[Any] = mask.astype(np.floataa ) / 2_55.0
__SCREAMING_SNAKE_CASE: Any = 0
__SCREAMING_SNAKE_CASE: Dict = 1
__SCREAMING_SNAKE_CASE: List[str] = torch.from_numpy(__lowerCAmelCase )
elif isinstance(mask[0] , torch.Tensor ):
__SCREAMING_SNAKE_CASE: Optional[Any] = torch.cat(__lowerCAmelCase , dim=0 )
return mask
class a ( UpperCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : UNetaDModel
SCREAMING_SNAKE_CASE__ : RePaintScheduler
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 250 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 10 , _lowerCAmelCase = 10 , _lowerCAmelCase = None , _lowerCAmelCase = "pil" , _lowerCAmelCase = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = image
__SCREAMING_SNAKE_CASE: List[Any] = _preprocess_image(_a )
__SCREAMING_SNAKE_CASE: int = original_image.to(device=self.device , dtype=self.unet.dtype )
__SCREAMING_SNAKE_CASE: Optional[int] = _preprocess_mask(_a )
__SCREAMING_SNAKE_CASE: Optional[Any] = mask_image.to(device=self.device , dtype=self.unet.dtype )
__SCREAMING_SNAKE_CASE: Optional[int] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(_a , _a ) and len(_a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__SCREAMING_SNAKE_CASE: int = original_image.shape
__SCREAMING_SNAKE_CASE: List[str] = randn_tensor(_a , generator=_a , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_a , _a , _a , self.device )
__SCREAMING_SNAKE_CASE: Dict = eta
__SCREAMING_SNAKE_CASE: Dict = self.scheduler.timesteps[0] + 1
__SCREAMING_SNAKE_CASE: Union[str, Any] = generator[0] if isinstance(_a , _a ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__SCREAMING_SNAKE_CASE: Tuple = self.unet(_a , _a ).sample
# compute previous image: x_t -> x_t-1
__SCREAMING_SNAKE_CASE: Tuple = self.scheduler.step(_a , _a , _a , _a , _a , _a ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__SCREAMING_SNAKE_CASE: List[Any] = self.scheduler.undo_step(_a , _a , _a )
__SCREAMING_SNAKE_CASE: int = t
__SCREAMING_SNAKE_CASE: int = (image / 2 + 0.5).clamp(0 , 1 )
__SCREAMING_SNAKE_CASE: Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE: Tuple = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 712 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase : Optional[int] = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 146 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Any = "mobilenet_v1"
def __init__(self : str , snake_case_ : int=3 , snake_case_ : List[Any]=2_2_4 , snake_case_ : Optional[Any]=1.0 , snake_case_ : Tuple=8 , snake_case_ : Any="relu6" , snake_case_ : List[str]=True , snake_case_ : Tuple=0.999 , snake_case_ : Any=0.02 , snake_case_ : List[Any]=0.001 , **snake_case_ : List[Any] , ):
super().__init__(**snake_case_ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
__a : Dict = num_channels
__a : List[str] = image_size
__a : str = depth_multiplier
__a : int = min_depth
__a : Optional[int] = hidden_act
__a : Optional[int] = tf_padding
__a : str = classifier_dropout_prob
__a : List[str] = initializer_range
__a : int = layer_norm_eps
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Tuple = version.parse("1.11" )
@property
def lowerCAmelCase (self : Any ):
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def lowerCAmelCase (self : Optional[int] ):
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def lowerCAmelCase (self : str ):
return 1E-4
| 521 |
from manim import *
class UpperCamelCase__ ( __lowercase ):
def lowerCAmelCase (self : Any ):
__a : List[Any] = Rectangle(height=0.5 , width=0.5 )
__a : Tuple = Rectangle(height=0.25 , width=0.25 )
__a : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a : List[str] = [mem.copy() for i in range(6 )]
__a : Tuple = [mem.copy() for i in range(6 )]
__a : List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : str = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : int = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
__a : int = Text('''CPU''' , font_size=2_4 )
__a : Tuple = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
__a : Optional[Any] = [mem.copy() for i in range(4 )]
__a : str = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : Tuple = Text('''GPU''' , font_size=2_4 )
__a : List[str] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.move_to([-1, -1, 0] )
self.add(snake_case_ )
__a : Union[str, Any] = [mem.copy() for i in range(6 )]
__a : int = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : List[str] = Text('''Model''' , font_size=2_4 )
__a : Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.add(snake_case_ )
__a : List[Any] = []
__a : str = []
__a : Optional[int] = []
for i, rect in enumerate(snake_case_ ):
rect.set_stroke(snake_case_ )
__a : Optional[int] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=snake_case_ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=snake_case_ , buff=0.0 )
self.add(snake_case_ )
model_cpu_arr.append(snake_case_ )
self.add(*snake_case_ , *snake_case_ , *snake_case_ )
__a : str = [mem.copy() for i in range(6 )]
__a : Optional[int] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : List[str] = Text('''Loaded Checkpoint''' , font_size=2_4 )
__a : Any = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(snake_case_ )
__a : Tuple = []
__a : Union[str, Any] = []
for i, rect in enumerate(snake_case_ ):
__a : Optional[int] = fill.copy().set_fill(snake_case_ , opacity=0.7 )
target.move_to(snake_case_ )
ckpt_arr.append(snake_case_ )
__a : Optional[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(snake_case_ )
self.add(*snake_case_ , *snake_case_ )
__a : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a : List[Any] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(snake_case_ , snake_case_ )
__a : Any = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=1_8 , )
blue_text.next_to(snake_case_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(snake_case_ )
__a : int = MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
__a : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
__a : List[Any] = [meta_mem.copy() for i in range(6 )]
__a : Any = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : Union[str, Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : Tuple = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
__a : List[Any] = Text('''Disk''' , font_size=2_4 )
__a : Union[str, Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(snake_case_ , run_time=3 ) , Write(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) )
__a : Union[str, Any] = []
for i, rect in enumerate(snake_case_ ):
__a : List[str] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(FadeOut(snake_case_ ) )
__a : Optional[int] = MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=3 ) )
self.play(
FadeOut(snake_case_ , snake_case_ , *snake_case_ , *snake_case_ ) , )
self.wait()
| 521 | 1 |
'''simple docstring'''
import os
import numpy
import onnx
def A_( A : Optional[Any] , A : Dict):
UpperCamelCase = a.name
UpperCamelCase = b.name
UpperCamelCase = ''
UpperCamelCase = ''
UpperCamelCase = a == b
UpperCamelCase = name_a
UpperCamelCase = name_b
return res
def A_( A : int , A : str , A : Dict):
for i, input_name in enumerate(node_proto.input):
if input_name == name:
node_proto.input.insert(A , A)
node_proto.input.pop(i + 1)
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , A , A)
_graph_replace_input_with(node_proto.attribute[1].g , A , A)
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , A , A)
def A_( A : int , A : Dict , A : Optional[int]):
for n in graph_proto.node:
_node_replace_input_with(A , A , A)
def A_( A : Any , A : Tuple , A : int):
UpperCamelCase = list(model.graph.initializer)
UpperCamelCase = list(model_without_ext.graph.initializer)
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCamelCase = inits[i].name
UpperCamelCase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i])
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , A , A)
def A_( A : Union[str, Any]):
UpperCamelCase = os.path.dirname(A)
UpperCamelCase = os.path.basename(A)
UpperCamelCase = onnx.load(os.path.join(A , A))
UpperCamelCase = list(model.graph.initializer)
UpperCamelCase = set()
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 0
for i in range(len(A)):
if i in dup_set:
continue
for j in range(i + 1 , len(A)):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j]):
dup_set.add(A)
dup_set.add(A)
UpperCamelCase = inits[j].data_type
UpperCamelCase = numpy.prod(inits[j].dims)
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , A)
total_reduced_size += mem_size
UpperCamelCase = inits[i].name
UpperCamelCase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(A)
else:
UpperCamelCase = [name_j]
ind_to_replace.append((j, i))
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB')
UpperCamelCase = sorted(A)
_remove_dup_initializers_from_model(A , A , A)
UpperCamelCase = 'optimized_' + model_file_name
UpperCamelCase = os.path.join(A , A)
onnx.save(A , A)
return new_model
| 432 |
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , A_=768 )-> Any:
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase = proj_size
UpperCamelCase = CLIPVisionModel(A_ )
UpperCamelCase = PaintByExampleMapper(A_ )
UpperCamelCase = nn.LayerNorm(config.hidden_size )
UpperCamelCase = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCamelCase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def UpperCAmelCase_ ( self , A_ , A_=False )-> Dict:
'''simple docstring'''
UpperCamelCase = self.model(pixel_values=A_ )
UpperCamelCase = clip_output.pooler_output
UpperCamelCase = self.mapper(latent_states[:, None] )
UpperCamelCase = self.final_layer_norm(A_ )
UpperCamelCase = self.proj_out(A_ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ )-> Tuple:
'''simple docstring'''
super().__init__()
UpperCamelCase = (config.num_hidden_layers + 1) // 5
UpperCamelCase = config.hidden_size
UpperCamelCase = 1
UpperCamelCase = nn.ModuleList(
[
BasicTransformerBlock(A_ , A_ , A_ , activation_fn='gelu' , attention_bias=A_ )
for _ in range(A_ )
] )
def UpperCAmelCase_ ( self , A_ )-> Dict:
'''simple docstring'''
for block in self.blocks:
UpperCamelCase = block(A_ )
return hidden_states
| 432 | 1 |
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
__a = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
__a = 1 - (matter_density + radiation_density + dark_energy)
__a = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__a = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCamelCase_ : Union[str, Any] = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 559 | import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = pd.read_csv("""sample_data.csv""", header=None)
lowerCamelCase_ : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
lowerCamelCase_ : Union[str, Any] = df.iloc[:, 1:2]
lowerCamelCase_ : str = actual_data.values.reshape(len_data, 1)
lowerCamelCase_ : List[Any] = MinMaxScaler().fit_transform(actual_data)
lowerCamelCase_ : List[str] = 10
lowerCamelCase_ : Tuple = 5
lowerCamelCase_ : Optional[Any] = 20
lowerCamelCase_ : List[Any] = len_data - periods * look_back
lowerCamelCase_ : Union[str, Any] = actual_data[:division]
lowerCamelCase_ : Dict = actual_data[division - look_back :]
lowerCamelCase_ , lowerCamelCase_ : int = [], []
lowerCamelCase_ , lowerCamelCase_ : List[Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowerCamelCase_ : Dict = np.array(train_x)
lowerCamelCase_ : Union[str, Any] = np.array(test_x)
lowerCamelCase_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
lowerCamelCase_ : Any = np.array([list(i.ravel()) for i in test_y])
lowerCamelCase_ : int = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
lowerCamelCase_ : Optional[int] = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
lowerCamelCase_ : int = model.predict(x_test)
| 559 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 614 |
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
__snake_case = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
__snake_case = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__snake_case = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
__snake_case = 4
__snake_case = True
# hparam_utils.py hparams
__snake_case = 0.664_694
__snake_case = 0.207_951
__snake_case = 0.121_194
__snake_case = True
__snake_case = True
__snake_case = False
__snake_case = 0.0_352_513
__snake_case = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__snake_case = 4
__snake_case = False
# hparam_utils.py hparams
__snake_case = 36.4_519
__snake_case = 0.903_421
__snake_case = 222.088
__snake_case = True
__snake_case = True
__snake_case = True
__snake_case = 0.763_141
__snake_case = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
__snake_case = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE )
elif task == "MLM":
__snake_case = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
__snake_case = TapasModel(config=SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''Task {task} not supported.''' )
print(F'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F'''Save tokenizer files to {pytorch_dump_path}''' )
__snake_case = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=5_12 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 614 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'roberta'
def __init__( self : Union[str, Any] , lowerCAmelCase : Optional[Any]=5_0265 , lowerCAmelCase : Optional[int]=768 , lowerCAmelCase : Any=12 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : List[Any]=3072 , lowerCAmelCase : int="gelu" , lowerCAmelCase : int=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : List[Any]=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Optional[Any]=1e-12 , lowerCAmelCase : Any=1 , lowerCAmelCase : str=0 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : str="absolute" , lowerCAmelCase : Dict=True , lowerCAmelCase : Tuple=None , **lowerCAmelCase : str , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _a ):
@property
def __lowercase ( self : Union[str, Any] ):
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 169 |
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
a = 3_0_0 # TEMPERATURE (unit = K)
def lowercase (snake_case__ : float , snake_case__ : float , snake_case__ : float , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ):
lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase_ : str = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : Tuple = num_channels
lowerCamelCase_ : Optional[int] = image_size
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : Tuple = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : List[str] = apply_ocr
def _UpperCamelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "apply_ocr" ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , a_ )
self.assertIsInstance(encoding.boxes , a_ )
# Test batched
lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# with apply_OCR = True
lowerCamelCase_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a_ )
self.assertListEqual(encoding.boxes , a_ )
# with apply_OCR = False
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ )
lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 718 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase_ : List[str] = cst_fwd.get(lowerCAmelCase_ , np.inf)
lowerCamelCase_ : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt))
lowerCamelCase_ : Optional[int] = new_cost_f
lowerCamelCase_ : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase_ : Tuple = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = -1
lowerCamelCase_ : Tuple = set()
lowerCamelCase_ : Dict = set()
lowerCamelCase_ : int = {source: 0}
lowerCamelCase_ : str = {destination: 0}
lowerCamelCase_ : Tuple = {source: None}
lowerCamelCase_ : Dict = {destination: None}
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : List[str] = np.inf
queue_forward.put((0, source))
queue_backward.put((0, destination))
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = queue_forward.get()
visited_forward.add(lowerCAmelCase_)
lowerCamelCase_ ,lowerCamelCase_ : str = queue_backward.get()
visited_backward.add(lowerCAmelCase_)
lowerCamelCase_ : Any = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
lowerCamelCase_ : Dict = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase_ : Union[str, Any] = shortest_distance
return shortest_path_distance
__magic_name__ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__magic_name__ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: Tuple = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowercase_ (lowercase__ ):
snake_case ='yolos'
def __init__( self , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=[512, 864] , lowercase_=16 , lowercase_=3 , lowercase_=True , lowercase_=100 , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> List[str]:
super().__init__(**lowercase_)
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =initializer_range
a__ =layer_norm_eps
a__ =image_size
a__ =patch_size
a__ =num_channels
a__ =qkv_bias
a__ =num_detection_tokens
a__ =use_mid_position_embeddings
a__ =auxiliary_loss
# Hungarian matcher
a__ =class_cost
a__ =bbox_cost
a__ =giou_cost
# Loss coefficients
a__ =bbox_loss_coefficient
a__ =giou_loss_coefficient
a__ =eos_coefficient
class lowercase_ (lowercase__ ):
snake_case =version.parse('1.11' )
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self) -> float:
return 1e-4
@property
def __UpperCamelCase ( self) -> int:
return 12
| 20 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __A ( ):
"""simple docstring"""
__a = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_A )
__a = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_A )
env_command_parser(subparsers=_A )
launch_command_parser(subparsers=_A )
tpu_command_parser(subparsers=_A )
test_command_parser(subparsers=_A )
# Let's go
__a = parser.parse_args()
if not hasattr(_A , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_A )
if __name__ == "__main__":
main()
| 197 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Optional[Any] = logging.get_logger(__name__)
A : Union[str, Any] = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Dict = '''data2vec-vision'''
def __init__( self : int , __lowerCAmelCase : List[Any]=7_68 , __lowerCAmelCase : Dict=12 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=30_72 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : Union[str, Any]=0.0_2 , __lowerCAmelCase : List[Any]=1e-12 , __lowerCAmelCase : Union[str, Any]=2_24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : int=False , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=[3, 5, 7, 11] , __lowerCAmelCase : Optional[Any]=[1, 2, 3, 6] , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : int=0.4 , __lowerCAmelCase : Dict=2_56 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Union[str, Any]=2_55 , **__lowerCAmelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = use_mask_token
A__ = use_absolute_position_embeddings
A__ = use_relative_position_bias
A__ = use_shared_relative_position_bias
A__ = layer_scale_init_value
A__ = drop_path_rate
A__ = use_mean_pooling
# decode head attributes (semantic segmentation)
A__ = out_indices
A__ = pool_scales
# auxiliary head attributes (semantic segmentation)
A__ = use_auxiliary_head
A__ = auxiliary_loss_weight
A__ = auxiliary_channels
A__ = auxiliary_num_convs
A__ = auxiliary_concat_input
A__ = semantic_loss_ignore_index
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = version.parse('''1.11''' )
@property
def a_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a_ ( self : Optional[Any] ) -> float:
"""simple docstring"""
return 1e-4
| 713 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A : Dict = datasets.logging.get_logger(__name__)
A : Optional[Any] = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
A : int = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
A : Union[str, Any] = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def __lowerCamelCase ( __a :Dict , __a :int , __a :int=False , __a :Optional[Any]=False , __a :int=True , __a :Optional[int]=False , __a :Dict="dummy_doc" ) -> Any:
"""simple docstring"""
A__ = {doc: key_lines}
A__ = {doc: sys_lines}
A__ = {}
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ , A__ = reader.get_doc_mentions(__a , key_doc_lines[doc] , __a )
key_singletons_num += singletons_num
if NP_only or min_span:
A__ = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a )
A__ , A__ = reader.get_doc_mentions(__a , sys_doc_lines[doc] , __a )
sys_singletons_num += singletons_num
if NP_only or min_span:
A__ = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a )
if remove_nested:
A__ , A__ = reader.remove_nested_coref_mentions(__a , __a )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
A__ , A__ = reader.remove_nested_coref_mentions(__a , __a )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
A__ = reader.get_mention_assignments(__a , __a )
A__ = reader.get_mention_assignments(__a , __a )
A__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
"""files, respectively""" )
return doc_coref_infos
def __lowerCamelCase ( __a :Any , __a :Union[str, Any] , __a :List[str] , __a :Dict , __a :str , __a :Tuple , __a :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A__ = get_coref_infos(__a , __a , __a , __a , __a , __a )
A__ = {}
A__ = 0
A__ = 0
for name, metric in metrics:
A__ , A__ , A__ = evaluator.evaluate_documents(__a , __a , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(1_0 ) , F'Recall: {recall * 1_0_0:.2f}' , F' Precision: {precision * 1_0_0:.2f}' , F' F1: {fa * 1_0_0:.2f}' , )
if conll_subparts_num == 3:
A__ = (conll / 3) * 1_0_0
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def __lowerCamelCase ( __a :int ) -> List[Any]:
"""simple docstring"""
A__ = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
A__ = line.split()[5]
if not parse_col == "-":
A__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A (datasets.Metric ):
'''simple docstring'''
def a_ ( self : int ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def a_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : int=False ) -> Optional[int]:
"""simple docstring"""
A__ = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
A__ = util.check_gold_parse_annotation(__lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
A__ = evaluate(
key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , )
return score
| 247 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class lowerCamelCase_ :
'''simple docstring'''
lowercase_ = BlenderbotConfig
lowercase_ = {}
lowercase_ = "gelu"
def __init__( self : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=13 , _lowerCAmelCase : Tuple=7 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : Dict=99 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : List[str]=37 , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : int=20 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : List[Any]=0 , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = pad_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE_ = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE_ = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ = TFBlenderbotModel(config=_lowerCAmelCase ).get_decoder()
SCREAMING_SNAKE_CASE_ = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE_ = input_ids[:1, :]
SCREAMING_SNAKE_CASE_ = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE_ = inputs_dict['head_mask']
SCREAMING_SNAKE_CASE_ = 1
# first forward pass
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , head_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE_ = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCAmelCase , _lowerCAmelCase , rtol=1E-3 )
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Dict=None , ) -> str:
if attention_mask is None:
SCREAMING_SNAKE_CASE_ = tf.cast(tf.math.not_equal(__UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowercase_ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowercase_ = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase_ = True
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = TFBlenderbotModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCAmelCase )
@require_tokenizers
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = ["My friends are cool but they eat too many carbs."]
lowercase_ = "facebook/blenderbot-400M-distill"
@cached_property
def lowerCAmelCase_ ( self : List[str] ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.src_text , return_tensors='tf' )
SCREAMING_SNAKE_CASE_ = self.model.generate(
model_inputs.input_ids , )
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowerCAmelCase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
) | 31 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Any ):
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 31 | 1 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def A ( snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] ) -> Any:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
__snake_case = np.concatenate(lowerCAmelCase__ , axis=0 )
__snake_case = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(lowerCAmelCase__ )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(lowerCAmelCase__ , dim=0 )
return image
def A ( snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Optional[int]=0.9_995 ) -> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , np.ndarray ):
__snake_case = True
__snake_case = va.device
__snake_case = va.cpu().numpy()
__snake_case = va.cpu().numpy()
__snake_case = np.sum(va * va / (np.linalg.norm(lowerCAmelCase__ ) * np.linalg.norm(lowerCAmelCase__ )) )
if np.abs(lowerCAmelCase__ ) > DOT_THRESHOLD:
__snake_case = (1 - t) * va + t * va
else:
__snake_case = np.arccos(lowerCAmelCase__ )
__snake_case = np.sin(lowerCAmelCase__ )
__snake_case = theta_a * t
__snake_case = np.sin(lowerCAmelCase__ )
__snake_case = np.sin(theta_a - theta_t ) / sin_theta_a
__snake_case = sin_theta_t / sin_theta_a
__snake_case = sa * va + sa * va
if inputs_are_torch:
__snake_case = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
return va
def A ( snake_case__ : Optional[Any] , snake_case__ : List[Any] ) -> int:
'''simple docstring'''
__snake_case = F.normalize(lowerCAmelCase__ , dim=-1 )
__snake_case = F.normalize(lowerCAmelCase__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def A ( snake_case__ : Optional[Any] , snake_case__ : Any ) -> List[str]:
'''simple docstring'''
for param in model.parameters():
__snake_case = value
class __lowercase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
vae=_a , text_encoder=_a , clip_model=_a , tokenizer=_a , unet=_a , scheduler=_a , feature_extractor=_a , coca_model=_a , coca_tokenizer=_a , coca_transform=_a , )
__snake_case = (
feature_extractor.size
if isinstance(feature_extractor.size , _a)
else feature_extractor.size['shortest_edge']
)
__snake_case = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std)
set_requires_grad(self.text_encoder , _a)
set_requires_grad(self.clip_model , _a)
def _a ( self , lowercase_ = "auto") -> Optional[int]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a)
def _a ( self) -> Optional[int]:
self.enable_attention_slicing(_a)
def _a ( self) -> Any:
set_requires_grad(self.vae , _a)
def _a ( self) -> Any:
set_requires_grad(self.vae , _a)
def _a ( self) -> Optional[Any]:
set_requires_grad(self.unet , _a)
def _a ( self) -> int:
set_requires_grad(self.unet , _a)
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
# get the original timestep using init_timestep
__snake_case = min(int(num_inference_steps * strength) , _a)
__snake_case = max(num_inference_steps - init_timestep , 0)
__snake_case = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> Any:
if not isinstance(_a , torch.Tensor):
raise ValueError(F"`image` has to be of type `torch.Tensor` but is {type(_a)}")
__snake_case = image.to(device=_a , dtype=_a)
if isinstance(_a , _a):
__snake_case = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(_a)
]
__snake_case = torch.cat(_a , dim=0)
else:
__snake_case = self.vae.encode(_a).latent_dist.sample(_a)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case = 0.1_8215 * init_latents
__snake_case = init_latents.repeat_interleave(_a , dim=0)
__snake_case = randn_tensor(init_latents.shape , generator=_a , device=_a , dtype=_a)
# get latents
__snake_case = self.scheduler.add_noise(_a , _a , _a)
__snake_case = init_latents
return latents
def _a ( self , lowercase_) -> List[str]:
__snake_case = self.coca_transform(_a).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
__snake_case = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype))
__snake_case = self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split('<end_of_text>')[0].replace('<start_of_text>' , '').rstrip(' .,')
def _a ( self , lowercase_ , lowercase_) -> List[Any]:
__snake_case = self.feature_extractor.preprocess(_a)
__snake_case = torch.from_numpy(clip_image_input['pixel_values'][0]).unsqueeze(0).to(self.device).half()
__snake_case = self.clip_model.get_image_features(_a)
__snake_case = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a)
__snake_case = image_embeddings_clip.repeat_interleave(_a , dim=0)
return image_embeddings_clip
@torch.enable_grad()
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> int:
__snake_case = latents.detach().requires_grad_()
__snake_case = self.scheduler.scale_model_input(_a , _a)
# predict the noise residual
__snake_case = self.unet(_a , _a , encoder_hidden_states=_a).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
__snake_case = self.scheduler.alphas_cumprod[timestep]
__snake_case = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__snake_case = torch.sqrt(_a)
__snake_case = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _a):
__snake_case = self.scheduler.sigmas[index]
__snake_case = latents - sigma * noise_pred
else:
raise ValueError(F"scheduler type {type(self.scheduler)} not supported")
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case = 1 / 0.1_8215 * sample
__snake_case = self.vae.decode(_a).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
__snake_case = transforms.Resize(self.feature_extractor_size)(_a)
__snake_case = self.normalize(_a).to(latents.dtype)
__snake_case = self.clip_model.get_image_features(_a)
__snake_case = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a)
__snake_case = spherical_dist_loss(_a , _a).mean() * clip_guidance_scale
__snake_case = -torch.autograd.grad(_a , _a)[0]
if isinstance(self.scheduler , _a):
__snake_case = latents.detach() + grads * (sigma**2)
__snake_case = noise_pred_original
else:
__snake_case = noise_pred_original - torch.sqrt(_a) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 0.6 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = 1_0_0 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = 0.8 , lowercase_ = 0.1 , lowercase_ = 0.1 , ) -> List[str]:
if isinstance(_a , _a) and len(_a) != batch_size:
raise ValueError(F"You have passed {batch_size} batch_size, but only {len(_a)} generators.")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if isinstance(_a , torch.Generator) and batch_size > 1:
__snake_case = [generator] + [None] * (batch_size - 1)
__snake_case = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
__snake_case = [x[0] for x in coca_is_none if x[1]]
__snake_case = ', '.join(_a)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_a):
raise ValueError(
F"Content prompt is None and CoCa [{coca_is_none_str}] is None."
F"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.")
__snake_case = self.get_image_description(_a)
if style_prompt is None:
if len(_a):
raise ValueError(
F"Style prompt is None and CoCa [{coca_is_none_str}] is None."
F" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.")
__snake_case = self.get_image_description(_a)
# get prompt text embeddings for content and style
__snake_case = self.tokenizer(
_a , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors='pt' , )
__snake_case = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
__snake_case = self.tokenizer(
_a , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors='pt' , )
__snake_case = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
__snake_case = slerp(_a , _a , _a)
# duplicate text embeddings for each generation per prompt
__snake_case = text_embeddings.repeat_interleave(_a , dim=0)
# set timesteps
__snake_case = 'offset' in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
__snake_case = {}
if accepts_offset:
__snake_case = 1
self.scheduler.set_timesteps(_a , **_a)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
__snake_case , __snake_case = self.get_timesteps(_a , _a , self.device)
__snake_case = timesteps[:1].repeat(_a)
# Preprocess image
__snake_case = preprocess(_a , _a , _a)
__snake_case = self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a)
__snake_case = preprocess(_a , _a , _a)
__snake_case = self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a)
__snake_case = slerp(_a , _a , _a)
if clip_guidance_scale > 0:
__snake_case = self.get_clip_image_embeddings(_a , _a)
__snake_case = self.get_clip_image_embeddings(_a , _a)
__snake_case = slerp(
_a , _a , _a)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = content_text_input.input_ids.shape[-1]
__snake_case = self.tokenizer([''] , padding='max_length' , max_length=_a , return_tensors='pt')
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
__snake_case = uncond_embeddings.repeat_interleave(_a , dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__snake_case = torch.randn(_a , generator=_a , device='cpu' , dtype=_a).to(
self.device)
else:
__snake_case = torch.randn(_a , generator=_a , device=self.device , dtype=_a)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
# check if the scheduler accepts generator
__snake_case = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
__snake_case = generator
with self.progress_bar(total=_a):
for i, t in enumerate(_a):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(_a , _a)
# predict the noise residual
__snake_case = self.unet(_a , _a , encoder_hidden_states=_a).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__snake_case = (
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
__snake_case , __snake_case = self.cond_fn(
_a , _a , _a , _a , _a , _a , _a , )
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(_a , _a , _a , **_a).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(_a).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(_a)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_a , nsfw_content_detected=_a)
| 721 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def A ( snake_case__ : List[Any] ) -> str:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__snake_case = k.replace(snake_case__ , snake_case__ )
return k
def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
__snake_case = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
__snake_case = PegasusConfig(**snake_case__ )
__snake_case = PegasusForConditionalGeneration(snake_case__ )
__snake_case = torch_model.model.state_dict()
__snake_case = {}
for k, v in tf_weights.items():
__snake_case = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__snake_case = v.T
__snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__snake_case = mapping['shared.weight']
__snake_case = mapping['shared.weight']
__snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
__snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
__snake_case = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
__snake_case = tf.train.list_variables(snake_case__ )
__snake_case = {}
__snake_case = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
__snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case = tf.train.load_variable(snake_case__ , snake_case__ )
__snake_case = array
return tf_weights
def A ( snake_case__ : str , snake_case__ : str ) -> Tuple:
'''simple docstring'''
# save tokenizer first
__snake_case = Path(snake_case__ ).parent.name
__snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
__snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
__snake_case = get_tf_weights_as_numpy(snake_case__ )
__snake_case = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
__snake_case = task_specific_params
__snake_case = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
__snake_case = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ : int = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase__ : str = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 676 | 0 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {'vocab_file': 'vocab.txt'}
_lowerCAmelCase : Dict = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
_lowerCAmelCase : Tuple = {
'openbmb/cpm-ant-10b': 1_0_2_4,
}
def a_ ( UpperCamelCase_ : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase = collections.OrderedDict()
with open(UpperCamelCase_ , 'r' , encoding='utf-8' ) as reader:
lowerCamelCase = reader.readlines()
for index, token in enumerate(UpperCamelCase_ ):
lowerCamelCase = token.rstrip('\n' )
lowerCamelCase = index
return vocab
class lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self : int , __snake_case : int , __snake_case : str="<unk>" , __snake_case : str=200 ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = vocab
lowerCamelCase = unk_token
lowerCamelCase = max_input_chars_per_word
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Optional[Any] ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = list(__snake_case )
if len(__snake_case ) > self.max_input_chars_per_word:
return [self.unk_token]
lowerCamelCase = 0
lowerCamelCase = []
while start < len(__snake_case ):
lowerCamelCase = len(__snake_case )
lowerCamelCase = None
while start < end:
lowerCamelCase = ''.join(chars[start:end] )
if substr in self.vocab:
lowerCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__snake_case )
lowerCamelCase = end
return sub_tokens
class lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ['input_ids', 'attention_mask']
snake_case = False
def __init__( self : Tuple , __snake_case : List[str] , __snake_case : Tuple="<d>" , __snake_case : Optional[Any]="</d>" , __snake_case : Union[str, Any]="<s>" , __snake_case : Optional[Any]="</s>" , __snake_case : Optional[Any]="<pad>" , __snake_case : Dict="<unk>" , __snake_case : int="</n>" , __snake_case : List[str]="</_>" , __snake_case : str="left" , **__snake_case : str , ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=__snake_case , eod_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , unk_token=__snake_case , line_token=__snake_case , space_token=__snake_case , padding_side=__snake_case , **__snake_case , )
lowerCamelCase = bod_token
lowerCamelCase = eod_token
lowerCamelCase = load_vocab(__snake_case )
lowerCamelCase = self.encoder[space_token]
lowerCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowerCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
lowerCamelCase = {v: k for k, v in self.encoder.items()}
lowerCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
return self.encoder["\n"]
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return len(self.encoder )
def lowerCamelCase__ ( self : List[Any] ) -> str:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Any ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = []
for x in jieba.cut(__snake_case , cut_all=__snake_case ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__snake_case ) )
return output_tokens
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Union[str, Any] , **__snake_case : str ) -> str:
'''simple docstring'''
lowerCamelCase = [i for i in token_ids if i >= 0]
lowerCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__snake_case , **__snake_case )
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return token in self.encoder
def lowerCamelCase__ ( self : List[Any] , __snake_case : List[str] ) -> str:
'''simple docstring'''
return "".join(__snake_case )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : List[Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self : Optional[int] , __snake_case : str ) -> Dict:
'''simple docstring'''
return self.decoder.get(__snake_case , self.unk_token )
def lowerCamelCase__ ( self : str , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if os.path.isdir(__snake_case ):
lowerCamelCase = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowerCamelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
lowerCamelCase = 0
if " " in self.encoder:
lowerCamelCase = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
lowerCamelCase = self.encoder['\n']
del self.encoder["\n"]
lowerCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
with open(__snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
lowerCamelCase = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def lowerCamelCase__ ( self : str , __snake_case : List[int] , __snake_case : List[int] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case ))
return [1] + ([0] * len(__snake_case ))
| 246 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_lowerCAmelCase : Tuple = 1.0_5457_1817e-34 # unit of ℏ : J * s
_lowerCAmelCase : int = 3e8 # unit of c : m * s^-1
def a_ ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ) -> dict[str, float]:
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
lowerCamelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCamelCase = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCamelCase = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 246 | 1 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = ["image_processor", "tokenizer"]
_UpperCamelCase : Dict = "ChineseCLIPImageProcessor"
_UpperCamelCase : List[str] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , a__=None , a__=None , **a__ ):
_lowerCAmelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , a__ , )
_lowerCAmelCase : List[str] = kwargs.pop("""feature_extractor""" )
_lowerCAmelCase : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a__ , a__ )
_lowerCAmelCase : Any = self.image_processor
def __call__( self , a__=None , a__=None , a__=None , **a__ ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_lowerCAmelCase : List[str] = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
_lowerCAmelCase : str = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
_lowerCAmelCase : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __A ( self , *a__ , **a__ ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __A ( self , *a__ , **a__ ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __A ( self ):
_lowerCAmelCase : Any = self.tokenizer.model_input_names
_lowerCAmelCase : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , a__ , )
return self.image_processor_class
| 719 | """simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 663 | 0 |
"""simple docstring"""
UpperCamelCase__ :Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def A_ ( ) -> None:
_UpperCamelCase :str = input('''Enter message: ''' )
_UpperCamelCase :List[str] = input('''Enter key [alphanumeric]: ''' )
_UpperCamelCase :Union[str, Any] = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
_UpperCamelCase :Any = '''encrypt'''
_UpperCamelCase :List[str] = encrypt_message(snake_case__ , snake_case__ )
elif mode.lower().startswith('''d''' ):
_UpperCamelCase :Any = '''decrypt'''
_UpperCamelCase :Dict = decrypt_message(snake_case__ , snake_case__ )
print(f"\n{mode.title()}ed message:" )
print(snake_case__ )
def A_ ( snake_case__ , snake_case__ ) -> str:
return translate_message(snake_case__ , snake_case__ , '''encrypt''' )
def A_ ( snake_case__ , snake_case__ ) -> str:
return translate_message(snake_case__ , snake_case__ , '''decrypt''' )
def A_ ( snake_case__ , snake_case__ , snake_case__ ) -> str:
_UpperCamelCase :Any = []
_UpperCamelCase :List[str] = 0
_UpperCamelCase :Dict = key.upper()
for symbol in message:
_UpperCamelCase :int = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case__ ):
_UpperCamelCase :Optional[Any] = 0
else:
translated.append(snake_case__ )
return "".join(snake_case__ )
if __name__ == "__main__":
main()
| 355 |
"""simple docstring"""
def A_ ( snake_case__ , snake_case__ = " " ) -> list:
_UpperCamelCase :List[str] = []
_UpperCamelCase :int = 0
for index, char in enumerate(snake_case__ ):
if char == separator:
split_words.append(string[last_index:index] )
_UpperCamelCase :Dict = index + 1
elif index + 1 == len(snake_case__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 355 | 1 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
a : List[Any] = 4_2
a : Optional[int] = 4_2
a : List[Any] = 4_2
@dataclass
class UpperCAmelCase__ :
a : List[Any] = 4_2
a : str = 4_2
a : Dict = None
a : List[Any] = None
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Union[str, Any] = """train"""
a : Dict = """dev"""
a : Any = """test"""
class UpperCAmelCase__ :
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Optional[int]:
raise NotImplementedError
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase ) -> str:
raise NotImplementedError
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False , UpperCamelCase="[CLS]" , UpperCamelCase=1 , UpperCamelCase="[SEP]" , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=0 , UpperCamelCase=0 , UpperCamelCase=-100 , UpperCamelCase=0 , UpperCamelCase=True , ) -> str:
__lowerCAmelCase = {label: i for i, label in enumerate(UpperCamelCase )}
__lowerCAmelCase = []
for ex_index, example in enumerate(UpperCamelCase ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d of %d" , UpperCamelCase , len(UpperCamelCase ) )
__lowerCAmelCase = []
__lowerCAmelCase = []
for word, label in zip(example.words , example.labels ):
__lowerCAmelCase = tokenizer.tokenize(UpperCamelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(UpperCamelCase ) > 0:
tokens.extend(UpperCamelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(UpperCamelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__lowerCAmelCase = tokenizer.num_special_tokens_to_add()
if len(UpperCamelCase ) > max_seq_length - special_tokens_count:
__lowerCAmelCase = tokens[: (max_seq_length - special_tokens_count)]
__lowerCAmelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__lowerCAmelCase = [sequence_a_segment_id] * len(UpperCamelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__lowerCAmelCase = [cls_token] + tokens
__lowerCAmelCase = [pad_token_label_id] + label_ids
__lowerCAmelCase = [cls_token_segment_id] + segment_ids
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__lowerCAmelCase = [1 if mask_padding_with_zero else 0] * len(UpperCamelCase )
# Zero-pad up to the sequence length.
__lowerCAmelCase = max_seq_length - len(UpperCamelCase )
if pad_on_left:
__lowerCAmelCase = ([pad_token] * padding_length) + input_ids
__lowerCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__lowerCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids
__lowerCAmelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(UpperCamelCase ) == max_seq_length
assert len(UpperCamelCase ) == max_seq_length
assert len(UpperCamelCase ) == max_seq_length
assert len(UpperCamelCase ) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***" )
logger.info("guid: %s" , example.guid )
logger.info("tokens: %s" , " ".join([str(UpperCamelCase ) for x in tokens] ) )
logger.info("input_ids: %s" , " ".join([str(UpperCamelCase ) for x in input_ids] ) )
logger.info("input_mask: %s" , " ".join([str(UpperCamelCase ) for x in input_mask] ) )
logger.info("segment_ids: %s" , " ".join([str(UpperCamelCase ) for x in segment_ids] ) )
logger.info("label_ids: %s" , " ".join([str(UpperCamelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCAmelCase = None
features.append(
InputFeatures(
input_ids=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , label_ids=UpperCamelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Union[str, Any] = 4_2
a : Union[str, Any] = nn.CrossEntropyLoss().ignore_index
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase=False , UpperCamelCase = Split.train , ) -> Optional[Any]:
# Load data features from cache or dataset file
__lowerCAmelCase = os.path.join(
UpperCamelCase , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(UpperCamelCase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + ".lock"
with FileLock(UpperCamelCase ):
if os.path.exists(UpperCamelCase ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
__lowerCAmelCase = torch.load(UpperCamelCase )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
__lowerCAmelCase = token_classification_task.read_examples_from_file(UpperCamelCase , UpperCamelCase )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCAmelCase = token_classification_task.convert_examples_to_features(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCamelCase , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , UpperCamelCase )
def __len__( self ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self , UpperCamelCase ) -> Any:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase__ :
a : List[Any] = 4_2
a : Dict = -1_0_0
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase=False , UpperCamelCase = Split.train , ) -> int:
__lowerCAmelCase = token_classification_task.read_examples_from_file(UpperCamelCase , UpperCamelCase )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCAmelCase = token_classification_task.convert_examples_to_features(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCamelCase , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCAmelCase = tf.data.Dataset.from_generator(
UpperCamelCase , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , (
{"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__lowerCAmelCase = tf.data.Dataset.from_generator(
UpperCamelCase , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , (
{
"input_ids": tf.TensorShape([None] ),
"attention_mask": tf.TensorShape([None] ),
"token_type_ids": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ) -> Dict:
return len(self.features )
def __getitem__( self , UpperCamelCase ) -> int:
return self.features[i] | 707 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase : List[Any] = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase = None ) -> Union[str, Any]:
__lowerCAmelCase = (
os.path.join(UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__lowerCAmelCase = Extractor
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__lowerCAmelCase = os.path.abspath(UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase ) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> bool:
return force_extract or (
not os.path.isfile(UpperCamelCase ) and not (os.path.isdir(UpperCamelCase ) and os.listdir(UpperCamelCase ))
)
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False ) -> str:
__lowerCAmelCase = self.extractor.infer_extractor_format(UpperCamelCase )
if not extractor_format:
return input_path
__lowerCAmelCase = self._get_output_path(UpperCamelCase )
if self._do_extract(UpperCamelCase , UpperCamelCase ):
self.extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return output_path
class UpperCAmelCase__ ( UpperCamelCase__ ):
@classmethod
@abstractmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool:
...
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
...
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
a : List[bytes] = []
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
with open(UpperCamelCase , "rb" ) as f:
return f.read(UpperCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool:
if not magic_number:
__lowerCAmelCase = max(len(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
__lowerCAmelCase = cls.read_magic_number(UpperCamelCase , UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( UpperCamelCase__ ):
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool:
return tarfile.is_tarfile(UpperCamelCase )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict:
def resolved(UpperCamelCase ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase ) )
def badpath(UpperCamelCase , UpperCamelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase , UpperCamelCase ) ).startswith(UpperCamelCase )
def badlink(UpperCamelCase , UpperCamelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
__lowerCAmelCase = resolved(os.path.join(UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase )
__lowerCAmelCase = resolved(UpperCamelCase )
for finfo in members:
if badpath(finfo.name , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
__lowerCAmelCase = tarfile.open(UpperCamelCase )
tar_file.extractall(UpperCamelCase , members=TarExtractor.safemembers(UpperCamelCase , UpperCamelCase ) )
tar_file.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x1F\x8B"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with gzip.open(UpperCamelCase , "rb" ) as gzip_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : List[Any] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool:
if super().is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase , "rb" ) as fp:
__lowerCAmelCase = _EndRecData(UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__lowerCAmelCase = fp.read(UpperCamelCase ) # CD is where we expect it to be
if len(UpperCamelCase ) == sizeCentralDir:
__lowerCAmelCase = struct.unpack(UpperCamelCase , UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with zipfile.ZipFile(UpperCamelCase , "r" ) as zip_file:
zip_file.extractall(UpperCamelCase )
zip_file.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Tuple = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with lzma.open(UpperCamelCase ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : str = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
__lowerCAmelCase = rarfile.RarFile(UpperCamelCase )
rf.extractall(UpperCamelCase )
rf.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : int = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
__lowerCAmelCase = zstd.ZstdDecompressor()
with open(UpperCamelCase , "rb" ) as ifh, open(UpperCamelCase , "wb" ) as ofh:
dctx.copy_stream(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with bza.open(UpperCamelCase , "rb" ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with pyazr.SevenZipFile(UpperCamelCase , "r" ) as archive:
archive.extractall(UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(UpperCamelCase , "rb" ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
a : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCAmelCase_ ( cls ) -> Optional[Any]:
return max(
len(UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase , UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict:
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase , magic_number_length=UpperCamelCase )
except OSError:
return b""
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = False ) -> bool:
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=UpperCamelCase , )
__lowerCAmelCase = cls.infer_extractor_format(UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase ) -> str: # <Added version="2.4.0"/>
__lowerCAmelCase = cls._get_magic_number_max_length()
__lowerCAmelCase = cls._read_magic_number(UpperCamelCase , UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return extractor_format
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(UpperCamelCase ) , exist_ok=UpperCamelCase )
# Prevent parallel extractions
__lowerCAmelCase = str(Path(UpperCamelCase ).with_suffix(".lock" ) )
with FileLock(UpperCamelCase ):
shutil.rmtree(UpperCamelCase , ignore_errors=UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase , UpperCamelCase ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=UpperCamelCase , )
__lowerCAmelCase = extractor if extractor != "deprecated" else extractor_format
else:
__lowerCAmelCase = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase , UpperCamelCase )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase ):
return extractor.extract(UpperCamelCase , UpperCamelCase ) | 39 | 0 |
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def snake_case ( UpperCamelCase__ : List[Any] ) -> Dict:
lowerCamelCase : Optional[int] = VideoMAEConfig()
set_architecture_configs(UpperCamelCase__ , UpperCamelCase__ )
if "finetuned" not in model_name:
lowerCamelCase : Union[str, Any] = False
if "finetuned" in model_name:
lowerCamelCase : Union[str, Any] = """huggingface/label-files"""
if "kinetics" in model_name:
lowerCamelCase : str = 400
lowerCamelCase : Any = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
lowerCamelCase : Optional[Any] = 174
lowerCamelCase : Dict = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : Union[str, Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : int = idalabel
lowerCamelCase : Any = {v: k for k, v in idalabel.items()}
return config
def snake_case ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ) -> List[Any]:
if "small" in model_name:
lowerCamelCase : str = 384
lowerCamelCase : int = 1536
lowerCamelCase : Any = 12
lowerCamelCase : Optional[int] = 16
lowerCamelCase : Any = 12
lowerCamelCase : Union[str, Any] = 3
lowerCamelCase : Any = 192
lowerCamelCase : Tuple = 768
elif "large" in model_name:
lowerCamelCase : Any = 1024
lowerCamelCase : List[Any] = 4096
lowerCamelCase : List[Any] = 24
lowerCamelCase : Tuple = 16
lowerCamelCase : List[Any] = 12
lowerCamelCase : Any = 8
lowerCamelCase : str = 512
lowerCamelCase : str = 2048
elif "huge" in model_name:
lowerCamelCase : str = 1280
lowerCamelCase : Union[str, Any] = 5120
lowerCamelCase : Optional[Any] = 32
lowerCamelCase : int = 16
lowerCamelCase : int = 12
lowerCamelCase : List[str] = 8
lowerCamelCase : Any = 640
lowerCamelCase : Dict = 2560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def snake_case ( UpperCamelCase__ : List[Any] ) -> Optional[int]:
if "encoder." in name:
lowerCamelCase : int = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
lowerCamelCase : Union[str, Any] = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
lowerCamelCase : List[str] = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase : List[Any] = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowerCamelCase : Any = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCamelCase : str = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
lowerCamelCase : Any = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
lowerCamelCase : Optional[Any] = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
lowerCamelCase : List[Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
lowerCamelCase : Optional[int] = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
lowerCamelCase : Optional[int] = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
lowerCamelCase : Any = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCamelCase : Optional[int] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCamelCase : Any = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
lowerCamelCase : Optional[Any] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
lowerCamelCase : int = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
lowerCamelCase : List[str] = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase : Any = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase : Any = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
lowerCamelCase : Union[str, Any] = name.replace("""head""" , """classifier""" )
return name
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Dict ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
lowerCamelCase : str = orig_state_dict.pop(UpperCamelCase__ )
if key.startswith("""encoder.""" ):
lowerCamelCase : Union[str, Any] = key.replace("""encoder.""" , """""" )
if "qkv" in key:
lowerCamelCase : Dict = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
lowerCamelCase : List[Any] = config.decoder_hidden_size
lowerCamelCase : str = int(key_split[2] )
lowerCamelCase : List[str] = """decoder.decoder_layers."""
if "weight" in key:
lowerCamelCase : Any = val[:dim, :]
lowerCamelCase : Tuple = val[dim : dim * 2, :]
lowerCamelCase : Any = val[-dim:, :]
else:
lowerCamelCase : str = config.hidden_size
lowerCamelCase : str = int(key_split[1] )
lowerCamelCase : int = """videomae.encoder.layer."""
if "weight" in key:
lowerCamelCase : List[Any] = val[:dim, :]
lowerCamelCase : Optional[int] = val[dim : dim * 2, :]
lowerCamelCase : Tuple = val[-dim:, :]
else:
lowerCamelCase : List[Any] = val
return orig_state_dict
def snake_case ( ) -> Tuple:
lowerCamelCase : Tuple = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowerCamelCase : Optional[int] = np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ) -> Dict:
lowerCamelCase : List[str] = get_videomae_config(UpperCamelCase__ )
if "finetuned" in model_name:
lowerCamelCase : int = VideoMAEForVideoClassification(UpperCamelCase__ )
else:
lowerCamelCase : List[str] = VideoMAEForPreTraining(UpperCamelCase__ )
# download original checkpoint, hosted on Google Drive
lowerCamelCase : List[str] = """pytorch_model.bin"""
gdown.cached_download(UpperCamelCase__ , UpperCamelCase__ , quiet=UpperCamelCase__ )
lowerCamelCase : str = torch.load(UpperCamelCase__ , map_location="""cpu""" )
if "model" in files:
lowerCamelCase : Tuple = files["""model"""]
else:
lowerCamelCase : int = files["""module"""]
lowerCamelCase : str = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# verify model on basic input
lowerCamelCase : Dict = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowerCamelCase : Tuple = prepare_video()
lowerCamelCase : List[Any] = image_processor(UpperCamelCase__ , return_tensors="""pt""" )
if "finetuned" not in model_name:
lowerCamelCase : str = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
lowerCamelCase : Tuple = torch.load(UpperCamelCase__ )
lowerCamelCase : int = model(**UpperCamelCase__ )
lowerCamelCase : Any = outputs.logits
lowerCamelCase : Dict = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCamelCase : str = torch.Size([1, 400] )
lowerCamelCase : Optional[Any] = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCamelCase : int = torch.Size([1, 174] )
lowerCamelCase : Union[str, Any] = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
lowerCamelCase : int = torch.Size([1, 1408, 1536] )
lowerCamelCase : str = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
lowerCamelCase : str = torch.Size([1, 1408, 1536] )
lowerCamelCase : Tuple = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCamelCase : Optional[Any] = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
lowerCamelCase : int = torch.Size([1, 1408, 1536] )
lowerCamelCase : str = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCamelCase : List[Any] = torch.Size([1, 400] )
lowerCamelCase : List[Any] = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCamelCase : int = torch.Size([1, 400] )
lowerCamelCase : List[str] = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCamelCase : Dict = torch.Size([1, 400] )
lowerCamelCase : Tuple = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCamelCase : int = torch.Size([1, 400] )
lowerCamelCase : Optional[Any] = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
lowerCamelCase : Any = torch.Size([1, 1408, 1536] )
lowerCamelCase : Any = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCamelCase : List[Any] = torch.Size([1, 174] )
lowerCamelCase : List[str] = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
lowerCamelCase : Dict = torch.Size([1, 1408, 1536] )
lowerCamelCase : str = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCamelCase : Optional[int] = torch.Size([1, 174] )
lowerCamelCase : List[Any] = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(F'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1E-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCamelCase : int = outputs.loss
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(UpperCamelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
__lowerCamelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCamelCase :Dict = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 222 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase :Union[str, Any] = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[int] ='''deit'''
def __init__( self: Optional[int] , __a: Optional[int]=768 , __a: int=12 , __a: List[Any]=12 , __a: List[Any]=3_072 , __a: Any="gelu" , __a: Optional[Any]=0.0 , __a: Dict=0.0 , __a: Dict=0.02 , __a: int=1e-1_2 , __a: int=224 , __a: Tuple=16 , __a: List[Any]=3 , __a: Union[str, Any]=True , __a: Union[str, Any]=16 , **__a: int , )-> Union[str, Any]:
super().__init__(**__a )
lowerCamelCase : List[str] = hidden_size
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : Dict = num_attention_heads
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : int = hidden_dropout_prob
lowerCamelCase : Tuple = attention_probs_dropout_prob
lowerCamelCase : List[str] = initializer_range
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : List[str] = image_size
lowerCamelCase : int = patch_size
lowerCamelCase : Dict = num_channels
lowerCamelCase : Tuple = qkv_bias
lowerCamelCase : Tuple = encoder_stride
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : str =version.parse('''1.11''')
@property
def a__ ( self: Optional[int] )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a__ ( self: Union[str, Any] )-> float:
return 1e-4
| 222 | 1 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase : Any = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : Optional[Any] = {
"""allenai/led-base-16384""": 16384,
}
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = LEDTokenizer
A = ["input_ids", "attention_mask"]
def __init__( self : List[str] , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Union[str, Any]="replace" , UpperCamelCase_ : Union[str, Any]="<s>" , UpperCamelCase_ : Dict="</s>" , UpperCamelCase_ : Any="</s>" , UpperCamelCase_ : Tuple="<s>" , UpperCamelCase_ : str="<unk>" , UpperCamelCase_ : str="<pad>" , UpperCamelCase_ : Optional[int]="<mask>" , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : Dict=True , **UpperCamelCase_ : List[Any] , ) -> int:
"""simple docstring"""
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCamelCase_ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase_ ) != add_prefix_space:
lowerCamelCase_ : Union[str, Any] = getattr(UpperCamelCase_ , pre_tok_state.pop('''type''' ) )
lowerCamelCase_ : Tuple = add_prefix_space
lowerCamelCase_ : List[Any] = pre_tok_class(**UpperCamelCase_ )
lowerCamelCase_ : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCamelCase_ : List[Any] = '''post_processor'''
lowerCamelCase_ : Tuple = getattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
if tokenizer_component_instance:
lowerCamelCase_ : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase_ : List[Any] = tuple(state['''sep'''] )
if "cls" in state:
lowerCamelCase_ : int = tuple(state['''cls'''] )
lowerCamelCase_ : Any = False
if state.get('''add_prefix_space''' , UpperCamelCase_ ) != add_prefix_space:
lowerCamelCase_ : str = add_prefix_space
lowerCamelCase_ : List[Any] = True
if state.get('''trim_offsets''' , UpperCamelCase_ ) != trim_offsets:
lowerCamelCase_ : Optional[Any] = trim_offsets
lowerCamelCase_ : List[Any] = True
if changes_to_apply:
lowerCamelCase_ : Tuple = getattr(UpperCamelCase_ , state.pop('''type''' ) )
lowerCamelCase_ : List[str] = component_class(**UpperCamelCase_ )
setattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else value
lowerCamelCase_ : int = value
def __UpperCamelCase ( self : int , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : str ) -> BatchEncoding:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = kwargs.get('''is_split_into_words''' , UpperCamelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCamelCase ( self : Union[str, Any] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : int ) -> BatchEncoding:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = kwargs.get('''is_split_into_words''' , UpperCamelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCamelCase ( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=None ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : str , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ : Any = [self.sep_token_id]
lowerCamelCase_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : int , UpperCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , ) -> dict:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = super()._pad(
encoded_inputs=UpperCamelCase_ , max_length=UpperCamelCase_ , padding_strategy=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
lowerCamelCase_ : List[str] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCamelCase_ : Tuple = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCamelCase_ : Union[str, Any] = len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCamelCase_ )
if needs_to_be_padded:
lowerCamelCase_ : List[Any] = len(UpperCamelCase_ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCamelCase_ : List[str] = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowerCamelCase_ : Optional[Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 418 |
'''simple docstring'''
def __snake_case (__UpperCAmelCase = 3 , __UpperCAmelCase = 7 , __UpperCAmelCase = 1000000 ):
"""simple docstring"""
lowerCamelCase_ : Any = 0
lowerCamelCase_ : Tuple = 1
for current_denominator in range(1 , limit + 1 ):
lowerCamelCase_ : str = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowerCamelCase_ : Any = current_numerator
lowerCamelCase_ : Dict = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1000000))
| 418 | 1 |
'''simple docstring'''
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if principal <= 0:
raise Exception("""Principal borrowed must be > 0""" )
if rate_per_annum < 0:
raise Exception("""Rate of interest must be >= 0""" )
if years_to_repay <= 0 or not isinstance(a__ , a__ ):
raise Exception("""Years to repay must be an integer > 0""" )
# Yearly rate is divided by 12 to get monthly rate
__SCREAMING_SNAKE_CASE = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__SCREAMING_SNAKE_CASE = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 627 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Any = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "ctrl"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple=246_534 , __SCREAMING_SNAKE_CASE : str=256 , __SCREAMING_SNAKE_CASE : Optional[Any]=1_280 , __SCREAMING_SNAKE_CASE : Dict=8_192 , __SCREAMING_SNAKE_CASE : Union[str, Any]=48 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-6 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : Tuple=True , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = n_positions
__SCREAMING_SNAKE_CASE = n_embd
__SCREAMING_SNAKE_CASE = n_layer
__SCREAMING_SNAKE_CASE = n_head
__SCREAMING_SNAKE_CASE = dff
__SCREAMING_SNAKE_CASE = resid_pdrop
__SCREAMING_SNAKE_CASE = embd_pdrop
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = use_cache
super().__init__(**__SCREAMING_SNAKE_CASE )
| 627 | 1 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_lowerCAmelCase : Optional[int] = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
_lowerCAmelCase : Any = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
_lowerCAmelCase : Any = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
_lowerCAmelCase : Union[str, Any] = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
_lowerCAmelCase : Optional[Any] = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) ,homepage='https://github.com/openai/human-eval' ,codebase_urls=['https://github.com/openai/human-eval'] ,reference_urls=['https://github.com/openai/human-eval'] ,license=_LICENSE ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_=[1, 10, 100] ,a_=4 ,a_=3.0 ):
"""simple docstring"""
if os.getenv('HF_ALLOW_CODE_EVAL' ,0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=a_ ) as executor:
lowerCAmelCase__ = []
lowerCAmelCase__ = Counter()
lowerCAmelCase__ = 0
lowerCAmelCase__ = defaultdict(a_ )
for task_id, (candidates, test_case) in enumerate(zip(a_ ,a_ ) ):
for candidate in candidates:
lowerCAmelCase__ = candidate + '\n' + test_case
lowerCAmelCase__ = (test_program, timeout, task_id, completion_id[task_id])
lowerCAmelCase__ = executor.submit(a_ ,*a_ )
futures.append(a_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(a_ ):
lowerCAmelCase__ = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for result in results.values():
result.sort()
lowerCAmelCase__ = [r[1]['passed'] for r in result]
total.append(len(a_ ) )
correct.append(sum(a_ ) )
lowerCAmelCase__ = np.array(a_ )
lowerCAmelCase__ = np.array(a_ )
lowerCAmelCase__ = k
lowerCAmelCase__ = {f'pass@{k}': estimate_pass_at_k(a_ ,a_ ,a_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ) -> int:
"""simple docstring"""
def estimator(snake_case__ , snake_case__ , snake_case__ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase__ = itertools.repeat(snake_case__ , len(snake_case__ ) )
else:
assert len(snake_case__ ) == len(snake_case__ )
lowerCAmelCase__ = iter(snake_case__ )
return np.array([estimator(int(snake_case__ ) , int(snake_case__ ) , snake_case__ ) for n, c in zip(snake_case__ , snake_case__ )] )
| 604 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : Union[str, Any] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
_lowerCAmelCase : Union[str, Any] = {
"moussaKam/mbarthez": 1_0_2_4,
"moussaKam/barthez": 1_0_2_4,
"moussaKam/barthez-orangesum-title": 1_0_2_4,
}
_lowerCAmelCase : int = "▁"
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE__ = BarthezTokenizer
def __init__( self ,a_=None ,a_=None ,a_="<s>" ,a_="</s>" ,a_="</s>" ,a_="<s>" ,a_="<unk>" ,a_="<pad>" ,a_="<mask>" ,**a_ ,):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(a_ ,lstrip=a_ ,rstrip=a_ ) if isinstance(a_ ,a_ ) else mask_token
super().__init__(
a_ ,tokenizer_file=a_ ,bos_token=a_ ,eos_token=a_ ,unk_token=a_ ,sep_token=a_ ,cls_token=a_ ,pad_token=a_ ,mask_token=a_ ,**a_ ,)
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ):
"""simple docstring"""
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
a_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file ,a_ )
return (out_vocab_file,)
| 604 | 1 |
from __future__ import annotations
from fractions import Fraction
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
lowercase__ = 11
lowercase__ = int('''1''' + '''0''' * digit_len )
for num in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
solutions.append(f'{num}/{den}' )
den += 1
num += 1
lowercase__ = 10
return solutions
def _a ( SCREAMING_SNAKE_CASE = 2 ):
"""simple docstring"""
lowercase__ = 1.0
for fraction in fraction_list(SCREAMING_SNAKE_CASE ):
lowercase__ = Fraction(SCREAMING_SNAKE_CASE )
result *= frac.denominator / frac.numerator
return int(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution())
| 43 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if (len(SCREAMING_SNAKE_CASE ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 1 |
'''simple docstring'''
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _lowerCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase = 1
@register_to_config
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : List[str]=2_0_0_0 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : List[str]=2_0 , SCREAMING_SNAKE_CASE : Any=1E-3 ) -> Any:
"""simple docstring"""
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
def __A ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, torch.device] = None ) -> Dict:
"""simple docstring"""
lowerCAmelCase = torch.linspace(1 , self.config.sampling_eps , SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any]=None ) -> int:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCAmelCase = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCAmelCase = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCAmelCase = std.unsqueeze(-1 )
lowerCAmelCase = -score / std
# compute
lowerCAmelCase = -1.0 / len(self.timesteps )
lowerCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCAmelCase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCAmelCase = beta_t.unsqueeze(-1 )
lowerCAmelCase = -0.5 * beta_t * x
lowerCAmelCase = torch.sqrt(SCREAMING_SNAKE_CASE )
lowerCAmelCase = drift - diffusion**2 * score
lowerCAmelCase = x + drift * dt
# add noise
lowerCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=SCREAMING_SNAKE_CASE , device=x.device , dtype=x.dtype )
lowerCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Tuple ) -> str:
"""simple docstring"""
return self.config.num_train_timesteps
| 714 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
lowercase : Any = threading.Lock()
lowercase : Optional[logging.Handler] = None
lowercase : List[Any] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
lowercase : List[Any] = logging.WARNING
lowercase : int = True
def __a ( ) -> Tuple:
lowerCAmelCase = os.getenv("TRANSFORMERS_VERBOSITY" , A__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def __a ( ) -> str:
return __name__.split("." )[0]
def __a ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def __a ( ) -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
lowerCAmelCase = logging.StreamHandler() # Set sys.stderr as stream.
lowerCAmelCase = sys.stderr.flush
# Apply our default configuration to the library root logger.
lowerCAmelCase = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
lowerCAmelCase = False
def __a ( ) -> None:
global _default_handler
with _lock:
if not _default_handler:
return
lowerCAmelCase = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
lowerCAmelCase = None
def __a ( ) -> List[Any]:
return log_levels
def __a ( A__ = None ) -> logging.Logger:
if name is None:
lowerCAmelCase = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(A__ )
def __a ( ) -> int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __a ( A__ ) -> None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(A__ )
def __a ( ) -> List[Any]:
return set_verbosity(A__ )
def __a ( ) -> str:
return set_verbosity(A__ )
def __a ( ) -> List[str]:
return set_verbosity(A__ )
def __a ( ) -> Tuple:
return set_verbosity(A__ )
def __a ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __a ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __a ( A__ ) -> None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(A__ )
def __a ( A__ ) -> None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(A__ )
def __a ( ) -> None:
_configure_library_root_logger()
lowerCAmelCase = False
def __a ( ) -> None:
_configure_library_root_logger()
lowerCAmelCase = True
def __a ( ) -> None:
lowerCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
lowerCAmelCase = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(A__ )
def __a ( ) -> None:
lowerCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(A__ )
def __a ( self , *A__ , **A__ ) -> List[Any]:
lowerCAmelCase = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , A__ )
if no_advisory_warnings:
return
self.warning(*A__ , **A__ )
lowercase : int = warning_advice
@functools.lru_cache(A__ )
def __a ( self , *A__ , **A__ ) -> Tuple:
self.warning(*A__ , **A__ )
lowercase : int = warning_once
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple: # pylint: disable=unused-argument
"""simple docstring"""
lowerCAmelCase = args[0] if args else None
def __iter__( self : Optional[int] ) -> int:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : str , SCREAMING_SNAKE_CASE : Dict ) -> Any:
"""simple docstring"""
def empty_fn(*SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : List[Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[int] ) -> int:
"""simple docstring"""
return self
def __exit__( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
return
class _lowerCAmelCase :
"""simple docstring"""
def __call__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
else:
return EmptyTqdm(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __A ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __A ( self : str ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowercase : Union[str, Any] = _tqdm_cls()
def __a ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def __a ( ) -> Optional[Any]:
global _tqdm_active
lowerCAmelCase = True
hf_hub_utils.enable_progress_bars()
def __a ( ) -> Optional[Any]:
global _tqdm_active
lowerCAmelCase = False
hf_hub_utils.disable_progress_bars()
| 159 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
__magic_name__ = {
'''facebook/bart-base''': 1_024,
'''facebook/bart-large''': 1_024,
'''facebook/bart-large-mnli''': 1_024,
'''facebook/bart-large-cnn''': 1_024,
'''facebook/bart-large-xsum''': 1_024,
'''yjernite/bart_eli5''': 1_024,
}
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : str = VOCAB_FILES_NAMES
_A : Tuple = PRETRAINED_VOCAB_FILES_MAP
_A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Any = ['input_ids', 'attention_mask']
_A : Union[str, Any] = BartTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="replace" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase=False , lowerCamelCase=True , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase , **lowerCamelCase , )
snake_case__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
snake_case__ = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
snake_case__ = add_prefix_space
snake_case__ = pre_tok_class(**lowerCamelCase )
snake_case__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case__ = "post_processor"
snake_case__ = getattr(self.backend_tokenizer , lowerCamelCase , lowerCamelCase )
if tokenizer_component_instance:
snake_case__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case__ = tuple(state["sep"] )
if "cls" in state:
snake_case__ = tuple(state["cls"] )
snake_case__ = False
if state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
snake_case__ = add_prefix_space
snake_case__ = True
if state.get("trim_offsets" , lowerCamelCase ) != trim_offsets:
snake_case__ = trim_offsets
snake_case__ = True
if changes_to_apply:
snake_case__ = getattr(lowerCamelCase , state.pop("type" ) )
snake_case__ = component_class(**lowerCamelCase )
setattr(self.backend_tokenizer , lowerCamelCase , lowerCamelCase )
@property
def A_ ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self , lowerCamelCase ):
snake_case__ = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else value
snake_case__ = value
def A_ ( self , *lowerCamelCase , **lowerCamelCase ):
snake_case__ = kwargs.get("is_split_into_words" , lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def A_ ( self , *lowerCamelCase , **lowerCamelCase ):
snake_case__ = kwargs.get("is_split_into_words" , lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def A_ ( self , lowerCamelCase , lowerCamelCase = None ):
snake_case__ = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def A_ ( self , lowerCamelCase , lowerCamelCase=None ):
snake_case__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self , lowerCamelCase , lowerCamelCase = None ):
snake_case__ = [self.sep_token_id]
snake_case__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 276 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
# vision encoder
if "img_encoder.pos_embed" in name:
snake_case__ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
snake_case__ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
snake_case__ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
snake_case__ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
snake_case__ = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
snake_case__ = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case__ = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
snake_case__ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
snake_case__ = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
snake_case__ = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
snake_case__ = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
snake_case__ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
snake_case__ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
snake_case__ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
snake_case__ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
snake_case__ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
snake_case__ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
snake_case__ = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
snake_case__ = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
snake_case__ = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
snake_case__ = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
snake_case__ = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
snake_case__ = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
snake_case__ = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
for key in orig_state_dict.copy().keys():
snake_case__ = orig_state_dict.pop(__lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case__ = key.split("." )
snake_case__ , snake_case__ = int(key_split[2] ), int(key_split[4] )
snake_case__ = config.vision_config.hidden_size
if "weight" in key:
snake_case__ = val[:dim, :]
snake_case__ = val[dim : dim * 2, :]
snake_case__ = val[-dim:, :]
else:
snake_case__ = val[:dim]
snake_case__ = val[dim : dim * 2]
snake_case__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case__ = key.split("." )
snake_case__ = int(key_split[3] )
snake_case__ = config.text_config.hidden_size
if "weight" in key:
snake_case__ = val[:dim, :]
snake_case__ = val[
dim : dim * 2, :
]
snake_case__ = val[-dim:, :]
else:
snake_case__ = val[:dim]
snake_case__ = val[dim : dim * 2]
snake_case__ = val[-dim:]
else:
snake_case__ = rename_key(__lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case__ = val.squeeze_()
else:
snake_case__ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="groupvit-gcc-yfcc" , __lowerCAmelCase=False ):
snake_case__ = GroupViTConfig()
snake_case__ = GroupViTModel(__lowerCAmelCase ).eval()
snake_case__ = torch.load(__lowerCAmelCase , map_location="cpu" )["model"]
snake_case__ = convert_state_dict(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ , snake_case__ = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__lowerCAmelCase ) == 0)
# verify result
snake_case__ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
snake_case__ = prepare_img()
snake_case__ = processor(text=["a photo of a cat", "a photo of a dog"] , images=__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
snake_case__ = model(**__lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
snake_case__ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
snake_case__ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , __lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
print("Successfully saved processor and model to" , __lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(__lowerCAmelCase , organization="nielsr" )
model.push_to_hub(__lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
__magic_name__ = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 276 | 1 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : int = ['image_processor', 'tokenizer']
UpperCamelCase_ : Dict = 'OwlViTImageProcessor'
UpperCamelCase_ : List[str] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : str , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=None , **lowerCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCamelCase__ , )
__lowercase = kwargs.pop('''feature_extractor''' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self : List[Any] , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : int=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : str="max_length" , lowerCamelCase__ : Union[str, Any]="np" , **lowerCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) or (isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not isinstance(text[0] , lowerCamelCase__ )):
__lowercase = [self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )]
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(text[0] , lowerCamelCase__ ):
__lowercase = []
# Maximum number of queries across batch
__lowercase = max([len(lowerCamelCase__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCamelCase__ ) != max_num_queries:
__lowercase = t + [''' '''] * (max_num_queries - len(lowerCamelCase__ ))
__lowercase = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
encodings.append(lowerCamelCase__ )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
__lowercase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
__lowercase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__lowercase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
__lowercase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__lowercase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
__lowercase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__lowercase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
__lowercase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
__lowercase = BatchEncoding()
__lowercase = input_ids
__lowercase = attention_mask
if query_images is not None:
__lowercase = BatchEncoding()
__lowercase = self.image_processor(
lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ ).pixel_values
__lowercase = query_pixel_values
if images is not None:
__lowercase = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase__ ) , tensor_type=lowerCamelCase__ )
def UpperCAmelCase_ ( self : int , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor.post_process(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , *lowerCamelCase__ : str , **lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : str , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCamelCase__ , )
return self.image_processor_class
@property
def UpperCAmelCase_ ( self : str ) -> Any:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCamelCase__ , )
return self.image_processor
| 362 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 362 | 1 |
from math import ceil
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> Optional[Any]:
__lowerCamelCase = list(range(0 , __lowerCAmelCase ) )
__lowerCamelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__lowerCamelCase = []
for i in device_map_blocks:
if device_map_blocks.count(__lowerCAmelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__lowerCAmelCase )
# Missing blocks
__lowerCamelCase = [i for i in blocks if i not in device_map_blocks]
__lowerCamelCase = [i for i in device_map_blocks if i not in blocks]
if len(__lowerCAmelCase ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(__lowerCAmelCase ) )
if len(__lowerCAmelCase ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(__lowerCAmelCase ) )
if len(__lowerCAmelCase ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(__lowerCAmelCase ) )
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Tuple ) -> List[Any]:
__lowerCamelCase = list(range(__lowerCAmelCase ) )
__lowerCamelCase = int(ceil(n_layers / len(__lowerCAmelCase ) ) )
__lowerCamelCase = [layers[i : i + n_blocks] for i in range(0 , __lowerCAmelCase , __lowerCAmelCase )]
return dict(zip(__lowerCAmelCase , __lowerCAmelCase ) )
| 298 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
SCREAMING_SNAKE_CASE__ : Any = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def __magic_name__ ( __lowerCAmelCase : Dict ) -> List[Any]:
for pegasus_name, hf_name in PATTERNS:
__lowerCamelCase = k.replace(__lowerCAmelCase , __lowerCAmelCase )
return k
def __magic_name__ ( __lowerCAmelCase : dict , __lowerCAmelCase : dict ) -> PegasusForConditionalGeneration:
__lowerCamelCase = DEFAULTS.copy()
cfg_kwargs.update(__lowerCAmelCase )
__lowerCamelCase = PegasusConfig(**__lowerCAmelCase )
__lowerCamelCase = PegasusForConditionalGeneration(__lowerCAmelCase )
__lowerCamelCase = torch_model.model.state_dict()
__lowerCamelCase = {}
for k, v in tf_weights.items():
__lowerCamelCase = rename_state_dict_key(__lowerCAmelCase )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
__lowerCamelCase = v.T
__lowerCamelCase = torch.tensor(__lowerCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
__lowerCamelCase = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
__lowerCamelCase = mapping['''shared.weight''']
__lowerCamelCase = mapping['''shared.weight''']
__lowerCamelCase = {k: torch.zeros_like(__lowerCAmelCase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**__lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = torch_model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
__lowerCamelCase = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def __magic_name__ ( __lowerCAmelCase : int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
__lowerCamelCase = tf.train.list_variables(__lowerCAmelCase )
__lowerCamelCase = {}
__lowerCamelCase = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(__lowerCAmelCase , desc='''converting tf checkpoint to dict''' ):
__lowerCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = array
return tf_weights
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> Union[str, Any]:
# save tokenizer first
__lowerCamelCase = Path(__lowerCAmelCase ).parent.name
__lowerCamelCase = task_specific_params[f'''summarization_{dataset}''']['''max_position_embeddings''']
__lowerCamelCase = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=__lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__lowerCAmelCase )
# convert model
__lowerCamelCase = get_tf_weights_as_numpy(__lowerCAmelCase )
__lowerCamelCase = task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
__lowerCamelCase = task_specific_params
__lowerCamelCase = convert_pegasus(__lowerCAmelCase , __lowerCAmelCase )
torch_model.save_pretrained(__lowerCAmelCase )
__lowerCamelCase = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(__lowerCAmelCase , Path(__lowerCAmelCase ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
if args.save_dir is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(args.tf_ckpt_path).parent.name
SCREAMING_SNAKE_CASE__ : Any = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 298 | 1 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __lowerCAmelCase :
pass
| 380 |
import os
def a ( ) -> Optional[int]:
"""simple docstring"""
with open(os.path.dirname(A__ ) + '/grid.txt' ) as f:
_lowercase =[] # noqa: E741
for _ in range(20 ):
l.append([int(A__ ) for x in f.readline().split()] )
_lowercase =0
# right
for i in range(20 ):
for j in range(17 ):
_lowercase =l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_lowercase =temp
# down
for i in range(17 ):
for j in range(20 ):
_lowercase =l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_lowercase =temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
_lowercase =l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_lowercase =temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
_lowercase =l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_lowercase =temp
return maximum
if __name__ == "__main__":
print(solution())
| 380 | 1 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = IFInpaintingPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Union[str, Any]:
return self._get_dummy_components()
def lowerCAmelCase__ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str=0 ) ->int:
if str(UpperCAmelCase__ ).startswith('''mps''' ):
UpperCAmelCase_ = torch.manual_seed(UpperCAmelCase__ )
else:
UpperCAmelCase_ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
UpperCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
UpperCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCAmelCase__ ( self : Dict ) ->List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase__ ( self : List[Any] ) ->str:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase__ ( self : Optional[Any] ) ->str:
self._test_save_load_local()
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 390 | '''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
lowercase__ : Dict = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCAmelCase_ = int(re.match(R'''.*layer_(\d*).*''' , _UpperCamelCase )[1] )
layer_number -= 3
return F"""h.{layer_number}.""" + key
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
UpperCAmelCase_ = re.search(R'''[^\d](\d+)$''' , str(_UpperCamelCase ) )
if bit_search is None:
raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" )
UpperCAmelCase_ = int(bit_search.groups()[0] )
return bit_size // 8
def __lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : Dict ):
'''simple docstring'''
if bloom_config_file == "":
UpperCAmelCase_ = BloomConfig()
else:
UpperCAmelCase_ = BloomConfig.from_json_file(_UpperCamelCase )
if shard_model:
UpperCAmelCase_ = os.listdir(_UpperCamelCase )
UpperCAmelCase_ = sorted(filter(lambda _UpperCamelCase : s.startswith('''layer''' ) and "model_00" in s , _UpperCamelCase ) )
UpperCAmelCase_ = {'''weight_map''': {}, '''metadata''': {}}
UpperCAmelCase_ = 0
UpperCAmelCase_ = None
UpperCAmelCase_ = BloomConfig()
for j, file in enumerate(_UpperCamelCase ):
print('''Processing file: {}'''.format(_UpperCamelCase ) )
UpperCAmelCase_ = None
for i in range(_UpperCamelCase ):
# load all TP files
UpperCAmelCase_ = file.replace('''model_00''' , F"""model_0{i}""" )
UpperCAmelCase_ = torch.load(os.path.join(_UpperCamelCase , _UpperCamelCase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ = list(temp.keys() )
for key in keys:
UpperCAmelCase_ = temp.pop(_UpperCamelCase )
if tensors is None:
UpperCAmelCase_ = temp
else:
for key in tensors.keys():
if any(key.endswith(_UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ = torch.cat([tensors[key], temp[key]] , dim=_UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ = tensors[key] / pretraining_tp
torch.save(
_UpperCamelCase , os.path.join(
_UpperCamelCase , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_UpperCamelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCAmelCase_ = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCAmelCase_ = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_UpperCamelCase ) ).zfill(5 ) )
UpperCAmelCase_ = BloomConfig()
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ = total_size
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_UpperCamelCase , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ = json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + '''\n'''
f.write(_UpperCamelCase )
else:
UpperCAmelCase_ = BloomModel(_UpperCamelCase )
UpperCAmelCase_ = os.listdir(_UpperCamelCase )
UpperCAmelCase_ = sorted(filter(lambda _UpperCamelCase : s.startswith('''layer''' ) and "model_00" in s , _UpperCamelCase ) )
UpperCAmelCase_ = None
for i, file in enumerate(_UpperCamelCase ):
UpperCAmelCase_ = None
for i in range(_UpperCamelCase ):
# load all TP files
UpperCAmelCase_ = file.replace('''model_00''' , F"""model_0{i}""" )
UpperCAmelCase_ = torch.load(os.path.join(_UpperCamelCase , _UpperCamelCase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ = list(temp.keys() )
for key in keys:
UpperCAmelCase_ = temp.pop(_UpperCamelCase )
if tensors is None:
UpperCAmelCase_ = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ = torch.cat([tensors[key], temp[key]] , dim=_UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ = tensors[key] / pretraining_tp
UpperCAmelCase_ = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
UpperCAmelCase_ = set(other_keys.missing_keys )
else:
UpperCAmelCase_ = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
UpperCAmelCase_ = model.to(config.torch_dtype )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
lowercase__ : Any = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 390 | 1 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowerCamelCase = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class UpperCamelCase_ ( UpperCamelCase ):
lowercase = '''ernie_m'''
lowercase = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , lowercase = 250_002 , lowercase = 768 , lowercase = 12 , lowercase = 12 , lowercase = 3_072 , lowercase = "gelu" , lowercase = 0.1 , lowercase = 0.1 , lowercase = 514 , lowercase = 0.02 , lowercase = 1 , lowercase = 1e-05 , lowercase=None , lowercase=False , lowercase=0.0 , **lowercase , ) -> List[Any]:
super().__init__(pad_token_id=lowercase , **lowercase )
_a : int = vocab_size
_a : str = hidden_size
_a : Union[str, Any] = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Any = intermediate_size
_a : Optional[int] = hidden_act
_a : int = hidden_dropout_prob
_a : Optional[int] = attention_probs_dropout_prob
_a : List[Any] = max_position_embeddings
_a : List[str] = initializer_range
_a : Dict = layer_norm_eps
_a : Union[str, Any] = classifier_dropout
_a : Optional[Any] = is_decoder
_a : Dict = act_dropout | 307 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__lowerCamelCase = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class UpperCamelCase_ :
def __init__( self , lowercase , lowercase=16 , lowercase=13 , lowercase=7 , lowercase=14 , lowercase=10 , lowercase=19 , lowercase=5 , lowercase=4 , lowercase=True , lowercase=16 , lowercase=2 , lowercase=4 , lowercase=4 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=[1, 2, 3, 4, 5] , lowercase=25 , lowercase=5 , ) -> Union[str, Any]:
_a : List[Any] = d_model
_a : Optional[int] = parent
_a : Dict = batch_size
_a : List[str] = prediction_length
_a : Tuple = context_length
_a : str = cardinality
_a : str = num_time_features
_a : int = lags_sequence
_a : Any = embedding_dimension
_a : List[Any] = is_training
_a : Any = hidden_size
_a : Dict = num_hidden_layers
_a : Any = num_attention_heads
_a : int = intermediate_size
_a : int = hidden_act
_a : Tuple = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Optional[int] = context_length
_a : int = prediction_length + label_length
_a : Optional[int] = label_length
_a : Optional[int] = moving_average
_a : Union[str, Any] = autocorrelation_factor
def snake_case__( self ) -> Union[str, Any]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def snake_case__( self , lowercase ) -> List[str]:
_a : Any = config.context_length + max(config.lags_sequence )
_a : Dict = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_a : List[str] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_a : Any = floats_tensor([self.batch_size, _past_length] )
_a : Optional[Any] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_a : Optional[int] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_a : Any = floats_tensor([self.batch_size, config.prediction_length] )
_a : Dict = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def snake_case__( self ) -> Optional[int]:
_a : Optional[int] = self.get_config()
_a : str = self.prepare_autoformer_inputs_dict(lowercase )
return config, inputs_dict
def snake_case__( self ) -> Any:
_a , _a : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__( self , lowercase , lowercase ) -> str:
_a : Any = AutoformerModel(config=lowercase ).to(lowercase ).eval()
_a : List[str] = model(**lowercase )
_a : int = outputs.encoder_last_hidden_state
_a : int = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_a : str = model.get_encoder()
encoder.save_pretrained(lowercase )
_a : Optional[Any] = AutoformerEncoder.from_pretrained(lowercase ).to(lowercase )
_a , _a , _a , _a , _a : str = model.create_network_inputs(**lowercase )
_a , _a : List[str] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_a : List[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_a : List[str] = encoder(inputs_embeds=lowercase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_a : Union[str, Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_a : str = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_a : Optional[int] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_a : int = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : List[str] = model.get_decoder()
decoder.save_pretrained(lowercase )
_a : Any = AutoformerDecoder.from_pretrained(lowercase ).to(lowercase )
_a : Any = decoder(
trend=lowercase , inputs_embeds=lowercase , encoder_hidden_states=lowercase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
lowercase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowercase = (AutoformerForPrediction,) if is_torch_available() else ()
lowercase = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def snake_case__( self ) -> Optional[int]:
_a : Tuple = AutoformerModelTester(self )
_a : Optional[int] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def snake_case__( self ) -> str:
self.config_tester.run_common_tests()
def snake_case__( self ) -> Dict:
_a , _a : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
_a , _a : Any = model_class.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertEqual(info['''missing_keys'''] , [] )
def snake_case__( self ) -> Dict:
_a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowercase )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def snake_case__( self ) -> List[Any]:
pass
def snake_case__( self ) -> int:
_a : List[str] = inspect.signature(getattr(lowercase , '''forward''' ) )
# The main input is the name of the argument after `self`
_a : str = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowercase )
def snake_case__( self ) -> Union[str, Any]:
_a , _a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Tuple = model_class(lowercase )
_a : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[Any] = [*signature.parameters.keys()]
_a : Optional[Any] = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(lowercase )] , lowercase )
def snake_case__( self ) -> Union[str, Any]:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[Any] = True
_a : Dict = getattr(self.model_tester , '''seq_length''' , lowercase )
_a : Optional[Any] = getattr(self.model_tester , '''decoder_seq_length''' , lowercase )
_a : Tuple = getattr(self.model_tester , '''encoder_seq_length''' , lowercase )
_a : List[str] = getattr(self.model_tester , '''d_model''' , lowercase )
_a : Dict = getattr(self.model_tester , '''num_attention_heads''' , lowercase )
_a : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
_a : str = True
_a : List[str] = False
_a : Optional[Any] = True
_a : Dict = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_a : List[Any] = model(**self._prepare_for_class(lowercase , lowercase ) )
_a : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a : Dict = True
_a : Any = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_a : Optional[int] = model(**self._prepare_for_class(lowercase , lowercase ) )
_a : Tuple = outputs.encoder_attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_a : Optional[int] = len(lowercase )
_a : Any = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowercase , lowercase )
# decoder attentions
_a : Union[str, Any] = outputs.decoder_attentions
self.assertIsInstance(lowercase , (list, tuple) )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_a : Tuple = outputs.cross_attentions
self.assertIsInstance(lowercase , (list, tuple) )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_a : Tuple = True
_a : Any = True
_a : Optional[Any] = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_a : Optional[Any] = model(**self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + 2 , len(lowercase ) )
_a : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def snake_case__( self ) -> List[str]:
super().test_retain_grad_hidden_states_attentions()
def UpperCamelCase__ ( UpperCAmelCase="train-batch.pt" ) -> Tuple:
"""simple docstring"""
_a : Union[str, Any] = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=UpperCAmelCase , repo_type='''dataset''' )
_a : str = torch.load(UpperCAmelCase , map_location=UpperCAmelCase )
return batch
@require_torch
@slow
class UpperCamelCase_ ( unittest.TestCase ):
def snake_case__( self ) -> Dict:
_a : List[Any] = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(lowercase )
_a : Optional[Any] = prepare_batch()
with torch.no_grad():
_a : Optional[int] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
_a : str = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowercase )
_a : List[Any] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=lowercase )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowercase , atol=lowercase ) )
def snake_case__( self ) -> List[Any]:
_a : str = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(lowercase )
_a : int = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a : Dict = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
_a : Dict = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowercase )
_a : Union[str, Any] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=lowercase )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowercase , atol=lowercase ) )
def snake_case__( self ) -> Union[str, Any]:
_a : Optional[Any] = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(lowercase )
_a : int = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a : Union[str, Any] = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
_a : List[str] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowercase )
_a : List[str] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=lowercase )
_a : List[str] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowercase , rtol=1e-1 ) ) | 307 | 1 |
import math
snake_case_ : Tuple = 10
snake_case_ : Optional[Any] = 7
snake_case_ : Dict = BALLS_PER_COLOUR * NUM_COLOURS
def __a ( __UpperCAmelCase : int = 20 ) -> str:
"""simple docstring"""
lowerCamelCase_ : Dict = math.comb(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Any = math.comb(NUM_BALLS - BALLS_PER_COLOUR , __UpperCAmelCase )
lowerCamelCase_ : str = NUM_COLOURS * (1 - missing_colour / total)
return f"{result:.9f}"
if __name__ == "__main__":
print(solution(20))
| 488 |
from functools import lru_cache
def __a ( __UpperCAmelCase : int ) -> set:
"""simple docstring"""
lowerCamelCase_ : List[str] = 2
lowerCamelCase_ : Any = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__UpperCAmelCase )
if n > 1:
factors.add(__UpperCAmelCase )
return factors
@lru_cache
def __a ( __UpperCAmelCase : int ) -> int:
"""simple docstring"""
return len(unique_prime_factors(__UpperCAmelCase ) )
def __a ( __UpperCAmelCase : list ) -> bool:
"""simple docstring"""
return len(set(__UpperCAmelCase ) ) in (0, 1)
def __a ( __UpperCAmelCase : int ) -> list:
"""simple docstring"""
lowerCamelCase_ : Tuple = 2
while True:
# Increment each value of a generated range
lowerCamelCase_ : Union[str, Any] = [base + i for i in range(__UpperCAmelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowerCamelCase_ : Any = [upf_len(__UpperCAmelCase ) for x in group]
checker.append(__UpperCAmelCase )
# If all numbers in the list are equal, return the group variable.
if equality(__UpperCAmelCase ):
return group
# Increment our base variable by 1
base += 1
def __a ( __UpperCAmelCase : int = 4 ) -> int:
"""simple docstring"""
lowerCamelCase_ : int = run(__UpperCAmelCase )
return results[0] if len(__UpperCAmelCase ) else None
if __name__ == "__main__":
print(solution())
| 488 | 1 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_snake_case = logging.getLogger(__name__)
_snake_case = 50 # max width of layer names
_snake_case = 70 # max width of quantizer names
def _A ( __magic_name__ ):
lowercase__ = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=__magic_name__ , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=__magic_name__ , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=__magic_name__ , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=__magic_name__ , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=__magic_name__ , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=__magic_name__ , type=__magic_name__ , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=__magic_name__ , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def _A ( __magic_name__ ):
if args.calibrator == "max":
lowercase__ = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
lowercase__ = "histogram"
elif args.calibrator == "mse":
lowercase__ = "histogram"
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
lowercase__ = QuantDescriptor(num_bits=args.aprec , calib_method=__magic_name__ )
lowercase__ = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__magic_name__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(__magic_name__ )
def _A ( __magic_name__ , __magic_name__ , __magic_name__=False , __magic_name__=False ):
logger.info("Configuring Model for Quantization" )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__magic_name__ , ["embeddings"] , which="weight" , _disabled=__magic_name__ )
if args.quant_disable:
set_quantizer_by_name(__magic_name__ , [""] , _disabled=__magic_name__ )
if args.quant_disable_keyword:
set_quantizer_by_name(__magic_name__ , args.quant_disable_keyword , _disabled=__magic_name__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(__magic_name__ , [R"layer.\d+." + args.quant_disable_layer_module] , _disabled=__magic_name__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(__magic_name__ , [R"layer.\d+." + args.quant_enable_layer_module] , _disabled=__magic_name__ )
if args.recalibrate_weights:
recalibrate_weights(__magic_name__ )
if args.fuse_qkv:
fuse_qkv(__magic_name__ , __magic_name__ )
if args.clip_gelu:
clip_gelu(__magic_name__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__magic_name__ )
def _A ( __magic_name__ ):
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def _A ( __magic_name__ , __magic_name__ ):
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__magic_name__ )
def _A ( __magic_name__ , __magic_name__ ):
def fusea(__magic_name__ , __magic_name__ , __magic_name__ ):
for mod in [qq, qk, qv]:
if not hasattr(__magic_name__ , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
lowercase__ = qq._amax.detach().item()
lowercase__ = qk._amax.detach().item()
lowercase__ = qv._amax.detach().item()
lowercase__ = max(__magic_name__ , __magic_name__ , __magic_name__ )
qq._amax.fill_(__magic_name__ )
qk._amax.fill_(__magic_name__ )
qv._amax.fill_(__magic_name__ )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _A ( __magic_name__ , __magic_name__ ):
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
lowercase__ = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__magic_name__ )
lowercase__ = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _A ( __magic_name__ ):
for name, mod in model.named_modules():
if hasattr(__magic_name__ , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
lowercase__ = mod.weight.shape[0]
lowercase__ = mod._weight_quantizer._amax.detach()
lowercase__ = torch.ones(__magic_name__ , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _A ( __magic_name__ ):
for name, mod in model.named_modules():
if hasattr(__magic_name__ , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowercase__ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowercase__ = set(range(len(mod.weight.size() ) ) ) - axis_set
lowercase__ = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__magic_name__ , keepdims=__magic_name__ ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
lowercase__ = amax
def _A ( __magic_name__ , __magic_name__=25 , __magic_name__=180 , __magic_name__=None ):
if ignore is None:
lowercase__ = []
elif not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = [ignore]
lowercase__ = 0
for name, mod in model.named_modules():
if not hasattr(__magic_name__ , "weight" ):
continue
lowercase__ = max(__magic_name__ , len(__magic_name__ ) )
for name, mod in model.named_modules():
lowercase__ = getattr(__magic_name__ , "_input_quantizer" , __magic_name__ )
lowercase__ = getattr(__magic_name__ , "_weight_quantizer" , __magic_name__ )
if not hasattr(__magic_name__ , "weight" ):
continue
if type(__magic_name__ ) in ignore:
continue
if [True for s in ignore if type(__magic_name__ ) is str and s in name]:
continue
lowercase__ = f'''Act:{input_q.extra_repr()}'''
lowercase__ = f'''Wgt:{weight_q.extra_repr()}'''
lowercase__ = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(__magic_name__ ) <= line_width:
logger.info(__magic_name__ )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{' ':{name_width}} {wgt_str}''' )
def _A ( __magic_name__ ):
lowercase__ = 0
for name, mod in model.named_modules():
if isinstance(__magic_name__ , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = getattr(__magic_name__ , __magic_name__ , __magic_name__ )
if quantizer_mod is not None:
assert hasattr(__magic_name__ , __magic_name__ )
setattr(__magic_name__ , __magic_name__ , __magic_name__ )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def _A ( __magic_name__ , __magic_name__ , __magic_name__="both" , **__magic_name__ ):
lowercase__ = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(__magic_name__ , __magic_name__ , "_input_quantizer" , __magic_name__ , __magic_name__ )
if which in ["weight", "both"]:
set_quantizer(__magic_name__ , __magic_name__ , "_weight_quantizer" , __magic_name__ , __magic_name__ )
logger.info(__magic_name__ )
def _A ( __magic_name__ , __magic_name__ , **__magic_name__ ):
for name, mod in model.named_modules():
if hasattr(__magic_name__ , "_input_quantizer" ) or hasattr(__magic_name__ , "_weight_quantizer" ):
for n in names:
if re.search(__magic_name__ , __magic_name__ ):
set_quantizers(__magic_name__ , __magic_name__ , **__magic_name__ )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(__magic_name__ , __magic_name__ ):
lowercase__ = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(__magic_name__ , __magic_name__ , __magic_name__ )
logger.info(__magic_name__ )
| 707 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = ['image_processor', 'tokenizer']
__lowerCamelCase = 'BridgeTowerImageProcessor'
__lowerCamelCase = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self :int , _lowercase :str , _lowercase :Union[str, Any] ):
'''simple docstring'''
super().__init__(_lowercase , _lowercase )
def __call__( self :Union[str, Any] , _lowercase :List[str] , _lowercase :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowercase :bool = True , _lowercase :Union[bool, str, PaddingStrategy] = False , _lowercase :Union[bool, str, TruncationStrategy] = None , _lowercase :Optional[int] = None , _lowercase :int = 0 , _lowercase :Optional[int] = None , _lowercase :Optional[bool] = None , _lowercase :Optional[bool] = None , _lowercase :bool = False , _lowercase :bool = False , _lowercase :bool = False , _lowercase :bool = False , _lowercase :bool = True , _lowercase :Optional[Union[str, TensorType]] = None , **_lowercase :int , ):
'''simple docstring'''
lowercase__ = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
# add pixel_values + pixel_mask
lowercase__ = self.image_processor(
_lowercase , return_tensors=_lowercase , do_normalize=_lowercase , do_center_crop=_lowercase , **_lowercase )
encoding.update(_lowercase )
return encoding
def UpperCAmelCase ( self :List[Any] , *_lowercase :List[str] , **_lowercase :Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase ( self :Union[str, Any] , *_lowercase :Any , **_lowercase :Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 611 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''facebook/bart-large-mnli'''
UpperCamelCase = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
UpperCamelCase = '''text_classifier'''
UpperCamelCase = AutoTokenizer
UpperCamelCase = AutoModelForSequenceClassification
UpperCamelCase = ['''text''', ['''text''']]
UpperCamelCase = ['''text''']
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().setup()
UpperCAmelCase_ = self.model.config
UpperCAmelCase_ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
UpperCAmelCase_ = int(_UpperCAmelCase )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def lowercase__ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = labels
return self.pre_processor(
[text] * len(_UpperCAmelCase ) , [F"""This example is {label}""" for label in labels] , return_tensors="pt" , padding="max_length" , )
def lowercase__ ( self : Any , _UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
UpperCAmelCase_ = outputs.logits
UpperCAmelCase_ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 82 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = """vit_msn"""
def __init__( self , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.0 , a__=0.0 , a__=0.02 , a__=1e-06 , a__=224 , a__=16 , a__=3 , a__=True , **a__ , ):
"""simple docstring"""
super().__init__(**a__)
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Optional[int] = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Optional[Any] = layer_norm_eps
_lowerCamelCase : Dict = image_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Optional[int] = qkv_bias
| 114 | 0 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
lowercase_ : List[str] = AutoTokenizer.from_pretrained('google/mt5-small' )
lowercase_ : List[str] = tokenizer('Hello there' ,return_tensors='np' ).input_ids
lowercase_ : int = tokenizer('Hi I am' ,return_tensors='np' ).input_ids
lowercase_ : List[str] = shift_tokens_right(__UpperCamelCase ,model.config.pad_token_id ,model.config.decoder_start_token_id )
lowercase_ : str = model(__UpperCamelCase ,decoder_input_ids=__UpperCamelCase ).logits
lowercase_ : str = optax.softmax_cross_entropy(__UpperCamelCase ,onehot(__UpperCamelCase ,logits.shape[-1] ) ).mean()
lowercase_ : str = -(labels.shape[-1] * loss.item())
lowercase_ : Optional[int] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 720 | """simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__SCREAMING_SNAKE_CASE ="2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__SCREAMING_SNAKE_CASE =concatenate_datasets
__SCREAMING_SNAKE_CASE =DownloadConfig
__SCREAMING_SNAKE_CASE =DownloadManager
__SCREAMING_SNAKE_CASE =DownloadMode
__SCREAMING_SNAKE_CASE =DownloadConfig
__SCREAMING_SNAKE_CASE =DownloadMode
__SCREAMING_SNAKE_CASE =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 477 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( ) -> Dict:
__snake_case = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
__snake_case = Dataset.from_dict(_UpperCAmelCase )
return dataset
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Any ):
"""simple docstring"""
__snake_case = get_dataset()
__snake_case = make_duplicate_clusters(a_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = get_dataset()
__snake_case , __snake_case = deduplicate_dataset(a_ )
self.assertEqual(len(a_ ) , 2 )
print(a_ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a_ )
| 69 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _lowercase ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self : Dict , __A : bool = True , __A : Optional[Dict[str, int]] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : Dict[str, int] = None , __A : bool = True , __A : Union[int, float] = 1 / 255 , __A : bool = True , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , **__A : int , ):
super().__init__(**__A )
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 256}
__A : Dict = get_size_dict(__A , default_to_square=__A )
__A : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : str = do_resize
__A : Dict = size
__A : Any = resample
__A : Optional[Any] = do_center_crop
__A : List[str] = crop_size
__A : Optional[int] = do_rescale
__A : int = rescale_factor
__A : Union[str, Any] = do_normalize
__A : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self : Optional[Any] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BICUBIC , __A : Optional[Union[str, ChannelDimension]] = None , **__A : List[Any] , ):
__A : str = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__A : Dict = get_resize_output_image_size(__A , size=size["""shortest_edge"""] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : np.ndarray , __A : Dict[str, int] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str , ):
__A : str = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def lowerCAmelCase_ ( self : List[str] , __A : np.ndarray , __A : float , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[int] ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Any , __A : np.ndarray , __A : Union[float, List[float]] , __A : Union[float, List[float]] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Tuple , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : int , __A : ImageInput , __A : Optional[bool] = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Dict[str, int] = None , __A : Optional[bool] = None , __A : Optional[float] = None , __A : Optional[bool] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__A : Optional[int] , ):
__A : List[str] = do_resize if do_resize is not None else self.do_resize
__A : Any = size if size is not None else self.size
__A : Union[str, Any] = get_size_dict(__A , default_to_square=__A )
__A : Tuple = resample if resample is not None else self.resample
__A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : List[Any] = crop_size if crop_size is not None else self.crop_size
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__A : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__A : Optional[int] = image_mean if image_mean is not None else self.image_mean
__A : List[str] = image_std if image_std is not None else self.image_std
__A : Union[str, Any] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__A : Union[str, Any] = [to_numpy_array(__A ) for image in images]
if do_resize:
__A : int = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
__A : Optional[Any] = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
__A : List[Any] = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
__A : Any = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
__A : int = [to_channel_dimension_format(__A , __A ) for image in images]
__A : Tuple = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[Tuple] = None ):
__A : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__A ) != len(__A ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__A ):
__A : str = target_sizes.numpy()
__A : int = []
for idx in range(len(__A ) ):
__A : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__A )
__A : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__A )
else:
__A : List[str] = logits.argmax(dim=1 )
__A : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 17 | 0 |
'''simple docstring'''
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__A = get_logger(__name__)
class a_ :
_snake_case = """dummy_data"""
_snake_case = """datasets"""
_snake_case = False
def __init__(self , __a , __a , __a , __a = None , __a = False , __a = True , __a = None , ) -> str:
"""simple docstring"""
__snake_case : Union[str, Any] = 0
__snake_case : str = dataset_name
__snake_case : str = cache_dir
__snake_case : Tuple = use_local_dummy_data
__snake_case : int = config
# download_callbacks take a single url as input
__snake_case : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__snake_case : List[str] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__snake_case : Optional[int] = str(__a)
# to be downloaded
__snake_case : List[str] = None
__snake_case : List[str] = None
@property
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
if self._dummy_file is None:
__snake_case : Tuple = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name)
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name)
@property
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , 'dummy_data.zip')
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
__snake_case : str = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__snake_case : int = cached_path(
__a , cache_dir=self.cache_dir , extract_compressed_file=__a , force_extract=__a)
return os.path.join(__a , self.dummy_file_name)
@property
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file)
@property
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
if self._bucket_url is None:
__snake_case : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/'))
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/').split('/')[:-1])
def SCREAMING_SNAKE_CASE__ (self , __a , *__a) -> Optional[Any]:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__snake_case : int = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__snake_case : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__a , __a):
return self.create_dummy_data_dict(__a , __a)
elif isinstance(__a , (list, tuple)):
return self.create_dummy_data_list(__a , __a)
else:
return self.create_dummy_data_single(__a , __a)
def SCREAMING_SNAKE_CASE__ (self , __a , *__a) -> Optional[int]:
"""simple docstring"""
return self.download_and_extract(__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> List[Any]:
"""simple docstring"""
return self.download_and_extract(__a)
def SCREAMING_SNAKE_CASE__ (self , __a , *__a , **__a) -> Union[str, Any]:
"""simple docstring"""
return path
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
return {}
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__a , __a):
for single_url in single_urls:
download_callback(__a)
else:
__snake_case : Optional[Any] = single_urls
download_callback(__a)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__a , __a):
__snake_case : Dict = [os.path.join(__a , urllib.parse.quote_plus(Path(__a).name)) for x in single_urls]
else:
__snake_case : Optional[Any] = single_urls
__snake_case : List[str] = os.path.join(__a , urllib.parse.quote_plus(Path(__a).name))
__snake_case : Optional[Any] = value
# make sure that values are unique
if all(isinstance(__a , __a) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()):
# append key to value to make its name unique
__snake_case : Optional[int] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> List[Any]:
"""simple docstring"""
__snake_case : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__snake_case : int = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , __a)) for url in data_url)
__snake_case : Any = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed') for url in data_url)
if data_url and (is_tf_records or is_pubmed_records):
__snake_case : Optional[Any] = [data_url[0]] * len(__a)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__a)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__snake_case : List[Any] = os.path.join(__a , urllib.parse.quote_plus(single_url.split('/')[-1]))
dummy_data_list.append(__a)
return dummy_data_list
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Optional[int]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(__a)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__snake_case : Tuple = os.path.join(__a , urllib.parse.quote_plus(data_url.split('/')[-1]))
if os.path.exists(__a) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]:
"""simple docstring"""
def _iter_archive_members(__a):
# this preserves the order of the members inside the ZIP archive
__snake_case : Optional[Any] = Path(self.dummy_file).parent
__snake_case : int = path.relative_to(__a)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
__snake_case : List[Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(__a)
__snake_case : Tuple = Path(__a)
__snake_case : int = _iter_archive_members(__a) if self.use_local_dummy_data else path.rglob('*')
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__')):
yield file_path.relative_to(__a).as_posix(), file_path.open('rb')
def SCREAMING_SNAKE_CASE__ (self , __a) -> Optional[int]:
"""simple docstring"""
if not isinstance(__a , __a):
__snake_case : Union[str, Any] = [paths]
for path in paths:
if os.path.isfile(__a):
if os.path.basename(__a).startswith(('.', '__')):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__a):
if os.path.basename(__a).startswith(('.', '__')):
continue
dirnames.sort()
for filename in sorted(__a):
if filename.startswith(('.', '__')):
continue
yield os.path.join(__a , __a) | 718 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class a_ :
def __init__(self , __a = None) -> None:
"""simple docstring"""
if components is None:
__snake_case : List[str] = []
__snake_case : Optional[int] = list(__a)
def __len__(self) -> int:
"""simple docstring"""
return len(self.__components)
def __str__(self) -> str:
"""simple docstring"""
return "(" + ",".join(map(__a , self.__components)) + ")"
def __add__(self , __a) -> Vector:
"""simple docstring"""
__snake_case : Optional[Any] = len(self)
if size == len(__a):
__snake_case : Optional[int] = [self.__components[i] + other.component(__a) for i in range(__a)]
return Vector(__a)
else:
raise Exception('must have the same size')
def __sub__(self , __a) -> Vector:
"""simple docstring"""
__snake_case : Optional[Any] = len(self)
if size == len(__a):
__snake_case : Optional[int] = [self.__components[i] - other.component(__a) for i in range(__a)]
return Vector(__a)
else: # error case
raise Exception('must have the same size')
@overload
def __mul__(self , __a) -> Vector:
"""simple docstring"""
...
@overload
def __mul__(self , __a) -> float:
"""simple docstring"""
...
def __mul__(self , __a) -> float | Vector:
"""simple docstring"""
if isinstance(__a , (float, int)):
__snake_case : str = [c * other for c in self.__components]
return Vector(__a)
elif isinstance(__a , __a) and len(self) == len(__a):
__snake_case : List[Any] = len(self)
__snake_case : Dict = [self.__components[i] * other.component(__a) for i in range(__a)]
return sum(__a)
else: # error case
raise Exception('invalid operand!')
def SCREAMING_SNAKE_CASE__ (self) -> Vector:
"""simple docstring"""
return Vector(self.__components)
def SCREAMING_SNAKE_CASE__ (self , __a) -> float:
"""simple docstring"""
if isinstance(__a , __a) and -len(self.__components) <= i < len(self.__components):
return self.__components[i]
else:
raise Exception('index out of range')
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> None:
"""simple docstring"""
assert -len(self.__components) <= pos < len(self.__components)
__snake_case : int = value
def SCREAMING_SNAKE_CASE__ (self) -> float:
"""simple docstring"""
if len(self.__components) == 0:
raise Exception('Vector is empty')
__snake_case : Tuple = [c**2 for c in self.__components]
return math.sqrt(sum(__a))
def SCREAMING_SNAKE_CASE__ (self , __a , __a = False) -> float:
"""simple docstring"""
__snake_case : Tuple = self * other
__snake_case : Optional[int] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den))
else:
return math.acos(num / den)
def _SCREAMING_SNAKE_CASE ( A : int ) -> Vector:
"""simple docstring"""
assert isinstance(A , A )
return Vector([0] * dimension )
def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> Vector:
"""simple docstring"""
assert isinstance(A , A ) and (isinstance(A , A ))
__snake_case : Any = [0] * dimension
__snake_case : int = 1
return Vector(A )
def _SCREAMING_SNAKE_CASE ( A : float , A : Vector , A : Vector ) -> Vector:
"""simple docstring"""
assert (
isinstance(A , A )
and isinstance(A , A )
and (isinstance(A , (int, float) ))
)
return x * scalar + y
def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int ) -> Vector:
"""simple docstring"""
random.seed(A )
__snake_case : List[Any] = [random.randint(A , A ) for _ in range(A )]
return Vector(A )
class a_ :
def __init__(self , __a , __a , __a) -> None:
"""simple docstring"""
__snake_case : Union[str, Any] = matrix
__snake_case : int = w
__snake_case : str = h
def __str__(self) -> str:
"""simple docstring"""
__snake_case : Dict = ''
for i in range(self.__height):
ans += "|"
for j in range(self.__width):
if j < self.__width - 1:
ans += str(self.__matrix[i][j]) + ","
else:
ans += str(self.__matrix[i][j]) + "|\n"
return ans
def __add__(self , __a) -> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__snake_case : Tuple = []
for i in range(self.__height):
__snake_case : List[Any] = [
self.__matrix[i][j] + other.component(__a , __a)
for j in range(self.__width)
]
matrix.append(__a)
return Matrix(__a , self.__width , self.__height)
else:
raise Exception('matrix must have the same dimension!')
def __sub__(self , __a) -> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__snake_case : str = []
for i in range(self.__height):
__snake_case : List[str] = [
self.__matrix[i][j] - other.component(__a , __a)
for j in range(self.__width)
]
matrix.append(__a)
return Matrix(__a , self.__width , self.__height)
else:
raise Exception('matrices must have the same dimension!')
@overload
def __mul__(self , __a) -> Matrix:
"""simple docstring"""
...
@overload
def __mul__(self , __a) -> Vector:
"""simple docstring"""
...
def __mul__(self , __a) -> Vector | Matrix:
"""simple docstring"""
if isinstance(__a , __a): # matrix-vector
if len(__a) == self.__width:
__snake_case : Tuple = zero_vector(self.__height)
for i in range(self.__height):
__snake_case : Union[str, Any] = [
self.__matrix[i][j] * other.component(__a)
for j in range(self.__width)
]
ans.change_component(__a , sum(__a))
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!')
elif isinstance(__a , (int, float)): # matrix-scalar
__snake_case : str = [
[self.__matrix[i][j] * other for j in range(self.__width)]
for i in range(self.__height)
]
return Matrix(__a , self.__width , self.__height)
return None
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
return self.__height
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
return self.__width
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds')
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> None:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
__snake_case : List[Any] = value
else:
raise Exception('change_component: indices out of bounds')
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square')
__snake_case : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__a)):
__snake_case : Tuple = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__a , self.__width - 1 , self.__height - 1).determinant()
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square')
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__a , __a)
else:
raise Exception('Indices out of bounds')
def SCREAMING_SNAKE_CASE__ (self) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square')
if self.__height < 1:
raise Exception('Matrix has no element')
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__snake_case : Any = [
self.__matrix[0][y] * self.cofactor(0 , __a) for y in range(self.__width)
]
return sum(__a)
def _SCREAMING_SNAKE_CASE ( A : int ) -> Matrix:
"""simple docstring"""
__snake_case : list[list[float]] = [[0] * n for _ in range(A )]
return Matrix(A , A , A )
def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int , A : int ) -> Matrix:
"""simple docstring"""
random.seed(A )
__snake_case : list[list[float]] = [
[random.randint(A , A ) for _ in range(A )] for _ in range(A )
]
return Matrix(A , A , A ) | 61 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE :int = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :str = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 236 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def UpperCAmelCase_ ( __lowercase : int ) -> Optional[Any]:
'''simple docstring'''
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=__lowercase )
_UpperCAmelCase = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__lowercase )
EnvironmentCommand.register_subcommand(__lowercase )
TestCommand.register_subcommand(__lowercase )
RunBeamCommand.register_subcommand(__lowercase )
DummyDataCommand.register_subcommand(__lowercase )
# Parse args
_UpperCAmelCase , _UpperCAmelCase = parser.parse_known_args()
if not hasattr(__lowercase , "func" ):
parser.print_help()
exit(1 )
_UpperCAmelCase = parse_unknown_args(__lowercase )
# Run
_UpperCAmelCase = args.func(__lowercase , **__lowercase )
service.run()
if __name__ == "__main__":
main()
| 236 | 1 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Tuple = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Any = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Dict = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
| 715 |
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCamelCase_ , lowerCamelCase_ )
return actual_power(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 685 | 0 |
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
lowercase__ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowercase__ = n - k
# Calculate C(n,k)
for i in range(__magic_name__ ):
result *= n - i
result //= i + 1
return result
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
return binomial_coefficient(2 * node_count , __magic_name__ ) // (node_count + 1)
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
lowercase__ = 1
for i in range(1 , n + 1 ):
result *= i
return result
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
return catalan_number(__magic_name__ ) * factorial(__magic_name__ )
if __name__ == "__main__":
A : Tuple = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
F'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
F'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 15 | '''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__lowerCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( UpperCAmelCase ):
def __init__( self : Dict , __a : List[Any] , __a : Optional[Any] ) -> Dict:
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__( self : Tuple , __a : int = 1 , __a : int = 1_0_0 , __a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a : Optional[float] = None , __a : bool = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
__UpperCAmelCase = self.unet.config.sample_size / self.unet.config.sample_rate
__UpperCAmelCase = audio_length_in_s * self.unet.config.sample_rate
__UpperCAmelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
__UpperCAmelCase = int(__a )
if sample_size % down_scale_factor != 0:
__UpperCAmelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
''' process.''' )
__UpperCAmelCase = int(__a )
__UpperCAmelCase = next(iter(self.unet.parameters() ) ).dtype
__UpperCAmelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__a , __a ) and len(__a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__UpperCAmelCase = randn_tensor(__a , generator=__a , device=self.device , dtype=__a )
# set step values
self.scheduler.set_timesteps(__a , device=audio.device )
__UpperCAmelCase = self.scheduler.timesteps.to(__a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__UpperCAmelCase = self.unet(__a , __a ).sample
# 2. compute previous image: x_t -> t_t-1
__UpperCAmelCase = self.scheduler.step(__a , __a , __a ).prev_sample
__UpperCAmelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
__UpperCAmelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__a )
| 262 | 0 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = CustomTokenizer
pass
| 720 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCamelCase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def _UpperCAmelCase ( A , A , A , A , A , A , A , A=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=A , exist_ok=A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
A , A , f=output_path.as_posix() , input_names=A , output_names=A , dynamic_axes=A , do_constant_folding=A , use_external_data_format=A , enable_onnx_checker=A , opset_version=A , )
else:
export(
A , A , f=output_path.as_posix() , input_names=A , output_names=A , dynamic_axes=A , do_constant_folding=A , opset_version=A , )
@torch.no_grad()
def _UpperCAmelCase ( A , A , A , A = False ):
'''simple docstring'''
UpperCAmelCase__ =torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
UpperCAmelCase__ ="cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
UpperCAmelCase__ ="cpu"
UpperCAmelCase__ =StableDiffusionPipeline.from_pretrained(A , torch_dtype=A ).to(A )
UpperCAmelCase__ =Path(A )
# TEXT ENCODER
UpperCAmelCase__ =pipeline.text_encoder.config.max_position_embeddings
UpperCAmelCase__ =pipeline.text_encoder.config.hidden_size
UpperCAmelCase__ =pipeline.tokenizer(
"A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=A , return_tensors="pt" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=A , dtype=torch.intaa )) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} , opset=A , )
del pipeline.text_encoder
# UNET
UpperCAmelCase__ =pipeline.unet.config.in_channels
UpperCAmelCase__ =pipeline.unet.config.sample_size
UpperCAmelCase__ =output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , A , A , A ).to(device=A , dtype=A ),
torch.randn(2 ).to(device=A , dtype=A ),
torch.randn(2 , A , A ).to(device=A , dtype=A ),
False,
) , output_path=A , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} , opset=A , use_external_data_format=A , )
UpperCAmelCase__ =str(unet_path.absolute().as_posix() )
UpperCAmelCase__ =os.path.dirname(A )
UpperCAmelCase__ =onnx.load(A )
# clean up existing tensor files
shutil.rmtree(A )
os.mkdir(A )
# collate external tensor files into one
onnx.save_model(
A , A , save_as_external_data=A , all_tensors_to_one_file=A , location="weights.pb" , convert_attribute=A , )
del pipeline.unet
# VAE ENCODER
UpperCAmelCase__ =pipeline.vae
UpperCAmelCase__ =vae_encoder.config.in_channels
UpperCAmelCase__ =vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
UpperCAmelCase__ =lambda A , A : vae_encoder.encode(A , A )[0].sample()
onnx_export(
A , model_args=(
torch.randn(1 , A , A , A ).to(device=A , dtype=A ),
False,
) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=A , )
# VAE DECODER
UpperCAmelCase__ =pipeline.vae
UpperCAmelCase__ =vae_decoder.config.latent_channels
UpperCAmelCase__ =vae_decoder.config.out_channels
# forward only through the decoder part
UpperCAmelCase__ =vae_encoder.decode
onnx_export(
A , model_args=(
torch.randn(1 , A , A , A ).to(device=A , dtype=A ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=A , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
UpperCAmelCase__ =pipeline.safety_checker
UpperCAmelCase__ =safety_checker.config.vision_config.num_channels
UpperCAmelCase__ =safety_checker.config.vision_config.image_size
UpperCAmelCase__ =safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , A , A , A , ).to(device=A , dtype=A ),
torch.randn(1 , A , A , A ).to(device=A , dtype=A ),
) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} , opset=A , )
del pipeline.safety_checker
UpperCAmelCase__ =OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" )
UpperCAmelCase__ =pipeline.feature_extractor
else:
UpperCAmelCase__ =None
UpperCAmelCase__ =None
UpperCAmelCase__ =OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) , scheduler=pipeline.scheduler , safety_checker=A , feature_extractor=A , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(A )
print("ONNX pipeline saved to" , A )
del pipeline
del onnx_pipeline
UpperCAmelCase__ =OnnxStableDiffusionPipeline.from_pretrained(A , provider="CPUExecutionProvider" )
print("ONNX pipeline is loadable" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
UpperCamelCase_ = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 510 | 0 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE :Any = logging.get_logger()
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = 42
snake_case_ = field(default_factory=__SCREAMING_SNAKE_CASE )
snake_case_ = field(default_factory=__SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( self : str ,A : Dict ,A : Tensor ,A : Tensor ):
__A = len(list(m.modules() ) ) == 1 or isinstance(A ,nn.Convad ) or isinstance(A ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(A )
def __call__( self : Dict ,A : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ ( self : str ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda A : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = 42
snake_case_ = 42
snake_case_ = 0
snake_case_ = field(default_factory=__SCREAMING_SNAKE_CASE )
snake_case_ = field(default_factory=__SCREAMING_SNAKE_CASE )
def __call__( self : Tuple ,A : Tensor ):
__A = Tracker(self.dest )(A ).parametrized
__A = Tracker(self.src )(A ).parametrized
__A = list(filter(lambda A : type(A ) not in self.src_skip ,A ) )
__A = list(filter(lambda A : type(A ) not in self.dest_skip ,A ) )
if len(A ) != len(A ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(A )} operations while'''
f''' destination module has {len(A )}.''' )
for dest_m, src_m in zip(A ,A ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def UpperCAmelCase ( a_ , a_ , a_ , a_ = True ) -> List[str]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
__A = timm.create_model(a_ , pretrained=a_ ).eval()
__A = ResNetForImageClassification(a_ ).eval()
__A = ModuleTransfer(src=a_ , dest=a_ )
__A = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(a_ )
assert torch.allclose(from_model(a_ ) , our_model(a_ ).logits ), "The model logits don't match the original one."
__A = F'''resnet{'-'.join(name.split('resnet' ) )}'''
print(a_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=a_ , )
# we can use the convnext one
__A = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=a_ , )
print(F'''Pushed {checkpoint_name}''' )
def UpperCAmelCase ( a_ , a_ = None , a_ = True ) -> Tuple:
"""simple docstring"""
__A = "imagenet-1k-id2label.json"
__A = 1_0_0_0
__A = (1, num_labels)
__A = "huggingface/label-files"
__A = num_labels
__A = json.load(open(hf_hub_download(a_ , a_ , repo_type="dataset" ) , "r" ) )
__A = {int(a_ ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
__A = partial(a_ , num_labels=a_ , idalabel=a_ , labelaid=a_ )
__A = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(a_ , names_to_config[model_name] , a_ , a_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a_ , a_ , a_ , a_ )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 55 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _UpperCamelCase (unittest.TestCase ):
def __UpperCAmelCase ( self )-> List[Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__lowerCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
__lowerCAmelCase = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids , __UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCAmelCase ( self )-> Tuple:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__lowerCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def __UpperCAmelCase ( self )-> List[Any]:
__lowerCAmelCase = [[1, 2, 3], [1, 2, 4]]
__lowerCAmelCase = DisjunctiveConstraint(__UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
__lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
__lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(3 )
__lowerCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCAmelCase ( self )-> int:
__lowerCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__lowerCAmelCase = DisjunctiveConstraint(__UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 367 | 0 |
def snake_case__ ( _snake_case : Tuple ):
"""simple docstring"""
stooge(_snake_case , 0 , len(_snake_case ) - 1 )
return arr
def snake_case__ ( _snake_case : List[str] , _snake_case : List[Any] , _snake_case : str ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase__ , UpperCamelCase__ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase__ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_snake_case , _snake_case , (h - t) )
# Recursively sort last 2/3 elements
stooge(_snake_case , i + t , (_snake_case) )
# Recursively sort first 2/3 elements
stooge(_snake_case , _snake_case , (h - t) )
if __name__ == "__main__":
A : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
A : Optional[Any] = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted)) | 706 | """simple docstring"""
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCamelCase_ :list ) -> None:
"""simple docstring"""
UpperCamelCase__ = set_counts
UpperCamelCase__ = max(lowerCamelCase_ )
UpperCamelCase__ = len(lowerCamelCase_ )
UpperCamelCase__ = [1] * num_sets
UpperCamelCase__ = list(range(lowerCamelCase_ ) )
def lowerCamelCase__ ( self :str , lowerCamelCase_ :int , lowerCamelCase_ :int ) -> bool:
"""simple docstring"""
UpperCamelCase__ = self.get_parent(lowerCamelCase_ )
UpperCamelCase__ = self.get_parent(lowerCamelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
UpperCamelCase__ = 0
UpperCamelCase__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
UpperCamelCase__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
UpperCamelCase__ = 0
UpperCamelCase__ = src_parent
UpperCamelCase__ = self.set_counts[src_parent]
UpperCamelCase__ = max(self.max_set , lowerCamelCase_ )
return True
def lowerCamelCase__ ( self :int , lowerCamelCase_ :int ) -> int:
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
UpperCamelCase__ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set] | 304 | 0 |
from collections import defaultdict
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : Tuple ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_UpperCAmelCase = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(lowerCamelCase ) )
]
_UpperCAmelCase = defaultdict(lowerCamelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_UpperCAmelCase = (1 << len(lowerCamelCase )) - 1
def lowerCamelCase ( self : Tuple , lowerCamelCase : Any , lowerCamelCase : Any ) -> List[Any]:
"""simple docstring"""
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_UpperCAmelCase = self.count_ways_until(lowerCamelCase , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
_UpperCAmelCase = total_ways_util
return self.dp[mask][task_no]
def lowerCamelCase ( self : List[Any] , lowerCamelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
# Store the list of persons for each task
for i in range(len(lowerCamelCase ) ):
for j in task_performed[i]:
self.task[j].append(lowerCamelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__a: Tuple = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__a: int = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 108 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
lowerCamelCase_ = os.path.dirname(os.path.realpath(_lowerCamelCase ) )
lowerCamelCase_ = os.path.join(_lowerCamelCase , '''triangle.txt''' )
with open(_lowerCamelCase ) as f:
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = []
for line in triangle:
lowerCamelCase_ = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(_lowerCamelCase ) )
a.append(_lowerCamelCase )
for i in range(1 , len(_lowerCamelCase ) ):
for j in range(len(a[i] ) ):
lowerCamelCase_ = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCamelCase_ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_lowerCamelCase , _lowerCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution()) | 142 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a : List[Any] = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 593 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class a ( unittest.TestCase , _lowerCamelCase ):
def A_ ( self : List[str] ):
snake_case_ = load_tool('''text-to-speech''' )
self.tool.setup()
def A_ ( self : List[str] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
snake_case_ = self.tool('''hey''' )
snake_case_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def A_ ( self : List[str] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
snake_case_ = self.tool('''hey''' )
snake_case_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 593 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a :List[str] = logging.get_logger(__name__)
__a :Tuple = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = 'table-transformer'
_lowerCamelCase : Any = ['past_key_values']
_lowerCamelCase : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Dict , UpperCAmelCase : str=True , UpperCAmelCase : str=None , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : List[str]=100 , UpperCAmelCase : Union[str, Any]=6 , UpperCAmelCase : Optional[int]=2048 , UpperCAmelCase : Any=8 , UpperCAmelCase : Union[str, Any]=6 , UpperCAmelCase : str=2048 , UpperCAmelCase : Dict=8 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : int=True , UpperCAmelCase : Any="relu" , UpperCAmelCase : int=256 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : str=0.0 , UpperCAmelCase : str=0.02 , UpperCAmelCase : List[str]=1.0 , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Dict="sine" , UpperCAmelCase : List[str]="resnet50" , UpperCAmelCase : str=True , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : Union[str, Any]=5 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Dict=1 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Optional[int]=0.1 , **UpperCAmelCase : int , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
A_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = backbone_config.get("model_type" )
A_ = CONFIG_MAPPING[backbone_model_type]
A_ = config_class.from_dict(UpperCAmelCase )
# set timm attributes to None
A_ , A_ , A_ = None, None, None
A_ = use_timm_backbone
A_ = backbone_config
A_ = num_channels
A_ = num_queries
A_ = d_model
A_ = encoder_ffn_dim
A_ = encoder_layers
A_ = encoder_attention_heads
A_ = decoder_ffn_dim
A_ = decoder_layers
A_ = decoder_attention_heads
A_ = dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = activation_function
A_ = init_std
A_ = init_xavier_std
A_ = encoder_layerdrop
A_ = decoder_layerdrop
A_ = encoder_layers
A_ = auxiliary_loss
A_ = position_embedding_type
A_ = backbone
A_ = use_pretrained_backbone
A_ = dilation
# Hungarian matcher
A_ = class_cost
A_ = bbox_cost
A_ = giou_cost
# Loss coefficients
A_ = mask_loss_coefficient
A_ = dice_loss_coefficient
A_ = bbox_loss_coefficient
A_ = giou_loss_coefficient
A_ = eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def __A ( self : int ):
return self.encoder_attention_heads
@property
def __A ( self : int ):
return self.d_model
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = version.parse('1.11' )
@property
def __A ( self : Any ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __A ( self : Any ):
return 1E-5
@property
def __A ( self : List[Any] ):
return 12 | 86 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Any:
lowerCAmelCase = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
lowerCAmelCase = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(lowercase )
from datasets import load_dataset
lowerCAmelCase = load_dataset("""nielsr/rvlcdip-demo""" )
lowerCAmelCase = dataset["""train"""][0]["""image"""].convert("""RGB""" )
lowerCAmelCase = image_processor(lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**lowercase )
lowerCAmelCase = outputs.logits
lowerCAmelCase = torch.Size((1, 16) )
self.assertEqual(logits.shape , lowercase )
lowerCAmelCase = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=lowercase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , lowercase , atol=1e-4 ) )
| 532 | 0 |
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_UpperCamelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : List[Any]=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : List[str] = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
__snake_case : List[int] = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
__snake_case : List[int] = list_field(
default=[8, 32, 1_28, 5_12] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
__snake_case : bool = field(
default=snake_case__ , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
__snake_case : bool = field(
default=snake_case__ , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
__snake_case : bool = field(
default=snake_case__ , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
__snake_case : bool = field(default=snake_case__ , metadata={"""help""": """Use FP16 to accelerate inference."""} )
__snake_case : bool = field(default=snake_case__ , metadata={"""help""": """Benchmark training of model"""} )
__snake_case : bool = field(default=snake_case__ , metadata={"""help""": """Verbose memory tracing"""} )
__snake_case : bool = field(
default=snake_case__ , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
__snake_case : bool = field(
default=snake_case__ , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
__snake_case : bool = field(default=snake_case__ , metadata={"""help""": """Trace memory line by line"""} )
__snake_case : bool = field(default=snake_case__ , metadata={"""help""": """Save result to a CSV file"""} )
__snake_case : bool = field(default=snake_case__ , metadata={"""help""": """Save all print statements in a log file"""} )
__snake_case : bool = field(default=snake_case__ , metadata={"""help""": """Whether to print environment information"""} )
__snake_case : bool = field(
default=snake_case__ , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
__snake_case : str = field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
__snake_case : str = field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
__snake_case : str = field(
default=F'''train_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
__snake_case : str = field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
__snake_case : str = field(
default=F'''env_info_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving environment information."""} , )
__snake_case : str = field(
default=F'''log_{round(time() )}.csv''' , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
__snake_case : int = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
__snake_case : bool = field(
default=snake_case__ , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def __lowercase ( self :List[Any] ):
warnings.warn(
f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , UpperCamelCase_ , )
def __lowercase ( self :Union[str, Any] ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __lowercase ( self :Tuple ):
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def __lowercase ( self :Optional[int] ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 710 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__snake_case : Union[str, Any] = LDMTextToImagePipeline
__snake_case : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
__snake_case : str = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
__snake_case : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__snake_case : Optional[Any] = False
def __lowercase ( self :List[str] ):
torch.manual_seed(0 )
__lowerCamelCase : str =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__lowerCamelCase : str =DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0 )
__lowerCamelCase : Optional[int] =AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase : Any =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCamelCase : Optional[int] =CLIPTextModel(__lowercase )
__lowerCamelCase : Dict =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCamelCase : Optional[int] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vqvae''': vae,
'''bert''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def __lowercase ( self :int , __lowercase :Optional[int] , __lowercase :Optional[Any]=0 ):
if str(__lowercase ).startswith('''mps''' ):
__lowerCamelCase : Any =torch.manual_seed(__lowercase )
else:
__lowerCamelCase : str =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCamelCase : Any ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self :List[str] ):
__lowerCamelCase : List[str] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : str =self.get_dummy_components()
__lowerCamelCase : Optional[int] =LDMTextToImagePipeline(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCamelCase : str =self.get_dummy_inputs(__lowercase )
__lowerCamelCase : List[Any] =pipe(**__lowercase ).images
__lowerCamelCase : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__lowerCamelCase : Optional[Any] =np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self :int , __lowercase :Any , __lowercase :Optional[int]=torch.floataa , __lowercase :Dict=0 ):
__lowerCamelCase : List[str] =torch.manual_seed(__lowercase )
__lowerCamelCase : List[str] =np.random.RandomState(__lowercase ).standard_normal((1, 4, 32, 32) )
__lowerCamelCase : List[str] =torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
__lowerCamelCase : Any ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self :Tuple ):
__lowerCamelCase : int =LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCamelCase : Tuple =self.get_inputs(__lowercase )
__lowerCamelCase : Optional[Any] =pipe(**__lowercase ).images
__lowerCamelCase : Union[str, Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
__lowerCamelCase : Union[str, Any] =np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] )
__lowerCamelCase : Dict =np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self :Dict , __lowercase :Optional[Any] , __lowercase :int=torch.floataa , __lowercase :Dict=0 ):
__lowerCamelCase : Any =torch.manual_seed(__lowercase )
__lowerCamelCase : Dict =np.random.RandomState(__lowercase ).standard_normal((1, 4, 32, 32) )
__lowerCamelCase : str =torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
__lowerCamelCase : Dict ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self :Tuple ):
__lowerCamelCase : Optional[int] =LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCamelCase : List[Any] =self.get_inputs(__lowercase )
__lowerCamelCase : Optional[int] =pipe(**__lowercase ).images[0]
__lowerCamelCase : Optional[int] =load_numpy(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' )
__lowerCamelCase : Dict =np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 363 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class snake_case_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: torch.FloatTensor
class snake_case_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@register_to_config
def __init__( self , __a = 32 , __a = 64 , __a = 20 , __a = 768 , __a=77 , __a=4 , __a = 0.0 , __a = "silu" , __a = None , __a = None , __a = "linear" , __a = "prd" , __a = None , __a = None , __a = None , ):
"""simple docstring"""
super().__init__()
A__ = num_attention_heads
A__ = attention_head_dim
A__ = num_attention_heads * attention_head_dim
A__ = additional_embeddings
A__ = time_embed_dim or inner_dim
A__ = embedding_proj_dim or embedding_dim
A__ = clip_embed_dim or embedding_dim
A__ = Timesteps(__a , __a , 0 )
A__ = TimestepEmbedding(__a , __a , out_dim=__a , act_fn=__a )
A__ = nn.Linear(__a , __a )
if embedding_proj_norm_type is None:
A__ = None
elif embedding_proj_norm_type == "layer":
A__ = nn.LayerNorm(__a )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
A__ = nn.Linear(__a , __a )
if encoder_hid_proj_type is None:
A__ = None
elif encoder_hid_proj_type == "linear":
A__ = nn.Linear(__a , __a )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
A__ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __a ) )
if added_emb_type == "prd":
A__ = nn.Parameter(torch.zeros(1 , 1 , __a ) )
elif added_emb_type is None:
A__ = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
A__ = nn.ModuleList(
[
BasicTransformerBlock(
__a , __a , __a , dropout=__a , activation_fn='gelu' , attention_bias=__a , )
for d in range(__a )
] )
if norm_in_type == "layer":
A__ = nn.LayerNorm(__a )
elif norm_in_type is None:
A__ = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
A__ = nn.LayerNorm(__a )
A__ = nn.Linear(__a , __a )
A__ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0000.0 )
causal_attention_mask.triu_(1 )
A__ = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , __a , persistent=__a )
A__ = nn.Parameter(torch.zeros(1 , __a ) )
A__ = nn.Parameter(torch.zeros(1 , __a ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = {}
def fn_recursive_add_processors(__a , __a , __a ):
if hasattr(__a , 'set_processor' ):
A__ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , __a , __a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__a , __a , __a )
return processors
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = len(self.attn_processors.keys() )
if isinstance(__a , __a ) and len(__a ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(__a )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(__a , __a , __a ):
if hasattr(__a , 'set_processor' ):
if not isinstance(__a , __a ):
module.set_processor(__a )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , __a , __a )
for name, module in self.named_children():
fn_recursive_attn_processor(__a , __a , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
def _UpperCAmelCase ( self , __a , __a , __a , __a = None , __a = None , __a = True , ):
"""simple docstring"""
A__ = hidden_states.shape[0]
A__ = timestep
if not torch.is_tensor(__a ):
A__ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__a ) and len(timesteps.shape ) == 0:
A__ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A__ = timesteps * torch.ones(__a , dtype=timesteps.dtype , device=timesteps.device )
A__ = self.time_proj(__a )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
A__ = timesteps_projected.to(dtype=self.dtype )
A__ = self.time_embedding(__a )
if self.embedding_proj_norm is not None:
A__ = self.embedding_proj_norm(__a )
A__ = self.embedding_proj(__a )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
A__ = self.encoder_hidden_states_proj(__a )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
A__ = self.proj_in(__a )
A__ = self.positional_embedding.to(hidden_states.dtype )
A__ = []
A__ = 0
if encoder_hidden_states is not None:
additional_embeds.append(__a )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
A__ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
A__ = hidden_states[:, None, :]
A__ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
A__ = self.prd_embedding.to(hidden_states.dtype ).expand(__a , -1 , -1 )
additional_embeds.append(__a )
A__ = torch.cat(
__a , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
A__ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
A__ = F.pad(
__a , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
A__ = hidden_states + positional_embeddings
if attention_mask is not None:
A__ = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
A__ = F.pad(__a , (0, self.additional_embeddings) , value=0.0 )
A__ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
A__ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
A__ = self.norm_in(__a )
for block in self.transformer_blocks:
A__ = block(__a , attention_mask=__a )
A__ = self.norm_out(__a )
if self.prd_embedding is not None:
A__ = hidden_states[:, -1]
else:
A__ = hidden_states[:, additional_embeddings_len:]
A__ = self.proj_to_clip_embeddings(__a )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__a )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 260 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class snake_case ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : Optional[int] = IFImgaImgSuperResolutionPipeline
snake_case_ : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
snake_case_ : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
snake_case_ : List[str] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def UpperCamelCase_ ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]=0) -> Optional[Any]:
"""simple docstring"""
if str(lowerCAmelCase).startswith("""mps"""):
_snake_case : Optional[Any] = torch.manual_seed(lowerCAmelCase)
else:
_snake_case : List[str] = torch.Generator(device=lowerCAmelCase).manual_seed(lowerCAmelCase)
_snake_case : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase)).to(lowerCAmelCase)
_snake_case : str = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowerCAmelCase)).to(lowerCAmelCase)
_snake_case : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase_ ( self : str) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def UpperCamelCase_ ( self : int) -> str:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def UpperCamelCase_ ( self : Tuple) -> Optional[int]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def UpperCamelCase_ ( self : Tuple) -> Dict:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def UpperCamelCase_ ( self : Dict) -> Optional[int]:
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase_ ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 477 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_snake_case : str = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *lowerCamelCase : int , **lowerCamelCase : Optional[int] ) -> None:
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 203 |
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
__snake_case : Dict = str(bin(__lowerCamelCase ) )
binary_number += "0" * shift_amount
return binary_number
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
__snake_case : List[str] = str(bin(__lowerCamelCase ) )[2:]
if shift_amount >= len(__lowerCamelCase ):
return "0b0"
__snake_case : Union[str, Any] = binary_number[: len(__lowerCamelCase ) - shift_amount]
return "0b" + shifted_binary_number
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if number >= 0: # Get binary representation of positive number
__snake_case : Optional[int] = "0" + str(bin(__lowerCamelCase ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
__snake_case : Tuple = len(bin(__lowerCamelCase )[3:] ) # Find 2's complement of number
__snake_case : Any = bin(abs(__lowerCamelCase ) - (1 << binary_number_length) )[3:]
__snake_case : Optional[Any] = (
"1" + "0" * (binary_number_length - len(__lowerCamelCase )) + binary_number
)
if shift_amount >= len(__lowerCamelCase ):
return "0b" + binary_number[0] * len(__lowerCamelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__lowerCamelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 203 | 1 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
SCREAMING_SNAKE_CASE_ = data_utils.TransfoXLTokenizer
SCREAMING_SNAKE_CASE_ = data_utils.TransfoXLCorpus
SCREAMING_SNAKE_CASE_ = data_utils
SCREAMING_SNAKE_CASE_ = data_utils
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] , lowerCAmelCase: List[Any] , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: int ) -> Union[str, Any]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(lowerCAmelCase , "rb" ) as fp:
_UpperCAmelCase : str = pickle.load(lowerCAmelCase , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_UpperCAmelCase : Dict = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
_UpperCAmelCase : int = corpus.vocab.__dict__
torch.save(lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , lowerCAmelCase )
_UpperCAmelCase : Dict = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(lowerCAmelCase , lowerCAmelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_UpperCAmelCase : List[Any] = os.path.abspath(lowerCAmelCase )
_UpperCAmelCase : List[Any] = os.path.abspath(lowerCAmelCase )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_UpperCAmelCase : Tuple = TransfoXLConfig()
else:
_UpperCAmelCase : int = TransfoXLConfig.from_json_file(lowerCAmelCase )
print(F'Building PyTorch model from configuration: {config}' )
_UpperCAmelCase : List[str] = TransfoXLLMHeadModel(lowerCAmelCase )
_UpperCAmelCase : Dict = load_tf_weights_in_transfo_xl(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
_UpperCAmelCase : Any = os.path.join(lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : Any = os.path.join(lowerCAmelCase , lowerCAmelCase )
print(F'Save PyTorch model to {os.path.abspath(lowerCAmelCase )}' )
torch.save(model.state_dict() , lowerCAmelCase )
print(F'Save configuration file to {os.path.abspath(lowerCAmelCase )}' )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 300 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 300 | 1 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _lowercase :
def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=99 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , ) -> List[str]:
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = embedding_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def A ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : int ) -> List[str]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
a = MobileBertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
a = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> str:
"""simple docstring"""
a = MobileBertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
a = MobileBertForNextSentencePrediction(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
a = MobileBertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
a = MobileBertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
a = self.num_labels
a = MobileBertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
a = self.num_labels
a = MobileBertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
a = self.num_choices
a = MobileBertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=False ) -> Any:
"""simple docstring"""
a = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase )
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def A ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
a = MobileBertModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def A ( self : int ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : str ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def A ( self : str ) -> str:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def A ( self : List[str] ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def A ( self : int ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def A ( self : List[Any] ) -> int:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def A ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def A ( self : int ) -> Tuple:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ):
'''simple docstring'''
return torch.tensor(
UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ , )
A_ : Dict = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
@slow
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(__lowerCAmelCase )
a = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
a = model(__lowerCAmelCase )[0]
a = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __lowerCAmelCase )
a = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=__lowerCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
a = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
a = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 32 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase__ ), '''Tatoeba directory does not exist.''' )
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : List[str] ) -> int:
"""simple docstring"""
a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowerCAmelCase )
@slow
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def A ( self : Dict ) -> Any:
"""simple docstring"""
a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowerCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 32 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 147 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : int , __a : str=13 , __a : Tuple=7 , __a : int=True , __a : int=True , __a : Dict=True , __a : str=True , __a : List[str]=99 , __a : Dict=64 , __a : Optional[Any]=32 , __a : List[Any]=5 , __a : Optional[int]=4 , __a : str=37 , __a : str="gelu" , __a : Optional[Any]=0.1 , __a : Union[str, Any]=0.1 , __a : int=512 , __a : Optional[Any]=16 , __a : Any=2 , __a : Dict=0.02 , __a : str=3 , __a : List[Any]=4 , __a : List[str]=None , ) ->Optional[int]:
lowerCamelCase_ : Dict = parent
lowerCamelCase_ : Optional[Any] = batch_size
lowerCamelCase_ : Any = seq_length
lowerCamelCase_ : Union[str, Any] = is_training
lowerCamelCase_ : int = use_input_mask
lowerCamelCase_ : int = use_token_type_ids
lowerCamelCase_ : int = use_labels
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : Any = embedding_size
lowerCamelCase_ : int = num_hidden_layers
lowerCamelCase_ : Union[str, Any] = num_attention_heads
lowerCamelCase_ : Optional[Any] = intermediate_size
lowerCamelCase_ : Optional[int] = hidden_act
lowerCamelCase_ : Dict = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Optional[int] = max_position_embeddings
lowerCamelCase_ : Dict = type_vocab_size
lowerCamelCase_ : List[str] = type_sequence_label_size
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Union[str, Any] = num_labels
lowerCamelCase_ : List[Any] = num_choices
lowerCamelCase_ : Dict = scope
def _lowerCAmelCase ( self : int ) ->str:
lowerCamelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Tuple = None
if self.use_input_mask:
lowerCamelCase_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : List[Any] = None
if self.use_token_type_ids:
lowerCamelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : List[Any] = None
lowerCamelCase_ : Tuple = None
lowerCamelCase_ : Optional[int] = None
if self.use_labels:
lowerCamelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self : Tuple , __a : int , __a : List[str] , __a : Tuple , __a : Any , __a : Union[str, Any] , __a : int , __a : Any ) ->Tuple:
lowerCamelCase_ : Tuple = MobileBertModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : Dict = model(__a , attention_mask=__a , token_type_ids=__a )
lowerCamelCase_ : Optional[int] = model(__a , token_type_ids=__a )
lowerCamelCase_ : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCAmelCase ( self : List[str] , __a : str , __a : Union[str, Any] , __a : int , __a : List[Any] , __a : Dict , __a : List[Any] , __a : List[str] ) ->Tuple:
lowerCamelCase_ : List[Any] = MobileBertForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : Union[str, Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Optional[Any] , __a : Tuple , __a : Tuple , __a : Dict , __a : Tuple , __a : Optional[Any] , __a : Union[str, Any] , __a : Dict ) ->int:
lowerCamelCase_ : Tuple = MobileBertForNextSentencePrediction(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : Any = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCAmelCase ( self : str , __a : Optional[Any] , __a : int , __a : Optional[int] , __a : Optional[int] , __a : Optional[int] , __a : Optional[int] , __a : Dict ) ->List[Any]:
lowerCamelCase_ : Optional[int] = MobileBertForPreTraining(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : List[Any] = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , next_sentence_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCAmelCase ( self : List[str] , __a : Tuple , __a : int , __a : Optional[Any] , __a : Optional[Any] , __a : Tuple , __a : Optional[Any] , __a : Any ) ->List[str]:
lowerCamelCase_ : Dict = MobileBertForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : Tuple = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Optional[Any] , __a : List[Any] , __a : str , __a : int , __a : Dict , __a : Dict , __a : List[Any] , __a : str ) ->Tuple:
lowerCamelCase_ : Dict = self.num_labels
lowerCamelCase_ : Optional[Any] = MobileBertForSequenceClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase_ : str = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : List[str] , __a : str , __a : List[str] , __a : Optional[int] , __a : int , __a : str , __a : str , __a : Optional[Any] ) ->Tuple:
lowerCamelCase_ : int = self.num_labels
lowerCamelCase_ : List[str] = MobileBertForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : Optional[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : Union[str, Any] , __a : Any , __a : Tuple , __a : Dict , __a : Dict , __a : List[Any] , __a : Optional[int] , __a : Optional[Any] ) ->List[str]:
lowerCamelCase_ : Any = self.num_choices
lowerCamelCase_ : int = MobileBertForMultipleChoice(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Tuple = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : List[str] ) ->int:
lowerCamelCase_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) : Optional[Any] = config_and_inputs
lowerCamelCase_ : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ (a__ , a__ , unittest.TestCase ):
'''simple docstring'''
_a = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_a = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = True
def _lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : Dict , __a : str=False ) ->Any:
lowerCamelCase_ : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class in get_values(__a ):
lowerCamelCase_ : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a )
lowerCamelCase_ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def _lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
lowerCamelCase_ : List[Any] = MobileBertModelTester(self )
lowerCamelCase_ : int = ConfigTester(self , config_class=__a , hidden_size=37 )
def _lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : str ) ->Any:
lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a )
def _lowerCAmelCase ( self : List[str] ) ->Tuple:
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a )
def _lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a )
def _lowerCAmelCase ( self : Any ) ->List[Any]:
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a )
def _lowerCAmelCase ( self : Optional[int] ) ->str:
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a )
def _lowerCAmelCase ( self : str ) ->Optional[Any]:
lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a )
def _lowerCAmelCase ( self : List[str] ) ->int:
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a )
def _lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a )
def __lowerCamelCase ( A__ : List[str] ) -> Optional[int]:
return torch.tensor(
A__ , dtype=torch.long , device=A__ , )
snake_case__ : List[str] = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ (unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self : List[Any] ) ->List[str]:
lowerCamelCase_ : Any = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(__a )
lowerCamelCase_ : int = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
lowerCamelCase_ : Optional[Any] = model(__a )[0]
lowerCamelCase_ : Any = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __a )
lowerCamelCase_ : str = torch.tensor(
[
[
[-2.4_73_65_26e07, 8.2_69_16_56e04, 1.6_52_18_38e05],
[-5.7_54_17_04e-01, 3.9_05_60_22e00, 4.4_01_15_07e00],
[2.6_04_73_59e00, 1.5_67_76_52e00, -1.7_32_41_88e-01],
]
] , device=__a , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase_ : str = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase_ : Tuple = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 278 | 0 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (IPNDMScheduler,)
SCREAMING_SNAKE_CASE__ = (("""num_inference_steps""", 50),)
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = {"""num_train_timesteps""": 10_00}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=0 , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = dict(self.forward_default_kwargs )
UpperCamelCase__ = kwargs.pop("""num_inference_steps""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
UpperCamelCase__ = dummy_past_residuals[:]
if time_step is None:
UpperCamelCase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
UpperCamelCase__ = dummy_past_residuals[:]
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = new_scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = new_scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=0 , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = dict(self.forward_default_kwargs )
UpperCamelCase__ = kwargs.pop("""num_inference_steps""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase__ = dummy_past_residuals[:]
if time_step is None:
UpperCamelCase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase__ = dummy_past_residuals[:]
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = new_scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = new_scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 10
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
return sample
def UpperCAmelCase_ (self ):
UpperCamelCase__ = dict(self.forward_default_kwargs )
UpperCamelCase__ = kwargs.pop("""num_inference_steps""" , SCREAMING_SNAKE_CASE_ )
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE_ , """set_timesteps""" ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE_ , """set_timesteps""" ):
UpperCamelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCamelCase__ = dummy_past_residuals[:]
UpperCamelCase__ = scheduler.timesteps[5]
UpperCamelCase__ = scheduler.timesteps[6]
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ (self ):
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ , time_step=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE_ , time_step=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.full_loop()
UpperCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 710 |
from sklearn.metrics import matthews_corrcoef
import datasets
lowerCamelCase_ = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
lowerCamelCase_ = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
lowerCamelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
return {
"matthews_correlation": float(matthews_corrcoef(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sample_weight=SCREAMING_SNAKE_CASE_ ) ),
}
| 86 | 0 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_UpperCAmelCase : int = logging.get_logger(__name__)
class lowerCAmelCase ( _A ):
UpperCAmelCase__ = ["""input_features""", """attention_mask"""]
def __init__( self : Any , UpperCAmelCase : Optional[Any]=80 , UpperCAmelCase : Dict=16000 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Union[str, Any]=10 , UpperCAmelCase : Dict=25 , UpperCAmelCase : Optional[int]="hamming_window" , UpperCAmelCase : Optional[int]=32768.0 , UpperCAmelCase : str=0.9_7 , UpperCAmelCase : str=1.0 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : str=False , **UpperCAmelCase : List[str] , ) -> List[Any]:
super().__init__(feature_size=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , padding_value=_lowerCAmelCase , **_lowerCAmelCase )
lowerCamelCase__ : Union[str, Any] = feature_size
lowerCamelCase__ : List[Any] = sampling_rate
lowerCamelCase__ : Any = padding_value
lowerCamelCase__ : Tuple = hop_length
lowerCamelCase__ : List[str] = win_length
lowerCamelCase__ : List[Any] = frame_signal_scale
lowerCamelCase__ : Tuple = preemphasis_coeff
lowerCamelCase__ : Union[str, Any] = mel_floor
lowerCamelCase__ : List[Any] = normalize_means
lowerCamelCase__ : List[str] = normalize_vars
lowerCamelCase__ : Any = win_function
lowerCamelCase__ : List[str] = return_attention_mask
lowerCamelCase__ : Tuple = win_length * sampling_rate // 1000
lowerCamelCase__ : List[str] = hop_length * sampling_rate // 1000
lowerCamelCase__ : Tuple = optimal_fft_length(self.sample_size )
lowerCamelCase__ : Union[str, Any] = (self.n_fft // 2) + 1
def A_ ( self : Optional[Any] , UpperCAmelCase : List[str] ) -> List[Any]:
if self.win_function == "hamming_window":
lowerCamelCase__ : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowerCAmelCase )
else:
lowerCamelCase__ : Optional[Any] = window_function(window_length=self.sample_size , name=self.win_function )
lowerCamelCase__ : str = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
lowerCamelCase__ : int = spectrogram(
one_waveform * self.frame_signal_scale , window=_lowerCAmelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_lowerCAmelCase , preemphasis=self.preemphasis_coeff , mel_filters=_lowerCAmelCase , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def A_ ( self : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Dict ) -> List[Any]:
# make sure we normalize float32 arrays
if self.normalize_means:
lowerCamelCase__ : List[Any] = x[:input_length].mean(axis=0 )
lowerCamelCase__ : List[str] = np.subtract(_lowerCAmelCase , _lowerCAmelCase )
if self.normalize_vars:
lowerCamelCase__ : Union[str, Any] = x[:input_length].std(axis=0 )
lowerCamelCase__ : int = np.divide(_lowerCAmelCase , _lowerCAmelCase )
if input_length < x.shape[0]:
lowerCamelCase__ : Any = padding_value
# make sure array is in float32
lowerCamelCase__ : int = x.astype(np.floataa )
return x
def A_ ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Any = None ) -> Tuple:
lowerCamelCase__ : List[str] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_lowerCAmelCase , _lowerCAmelCase , self.padding_value ) for x, n in zip(_lowerCAmelCase , _lowerCAmelCase )]
def __call__( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Dict = False , UpperCAmelCase : Dict = None , UpperCAmelCase : Tuple = False , UpperCAmelCase : List[Any] = None , UpperCAmelCase : Tuple = None , UpperCAmelCase : int = None , UpperCAmelCase : Any = None , **UpperCAmelCase : int , ) -> List[str]:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCamelCase__ : Tuple = isinstance(_lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowerCamelCase__ : Optional[int] = is_batched_numpy or (
isinstance(_lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ : List[str] = [np.asarray(_lowerCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowerCAmelCase , np.ndarray ):
lowerCamelCase__ : Optional[Any] = np.asarray(_lowerCAmelCase , dtype=np.floataa )
elif isinstance(_lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ : Tuple = [raw_speech]
# extract fbank features
lowerCamelCase__ : List[Any] = [self._extract_mfsc_features(_lowerCAmelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
lowerCamelCase__ : Optional[int] = BatchFeature({'input_features': features} )
lowerCamelCase__ : Tuple = self.pad(
_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , **_lowerCAmelCase , )
# make sure list is in array format
lowerCamelCase__ : List[Any] = padded_inputs.get('input_features' )
if isinstance(input_features[0] , _lowerCAmelCase ):
lowerCamelCase__ : Optional[Any] = [np.asarray(_lowerCAmelCase , dtype=np.floataa ) for feature in input_features]
lowerCamelCase__ : Optional[Any] = padded_inputs.get('attention_mask' )
if attention_mask is not None:
lowerCamelCase__ : List[Any] = [np.asarray(_lowerCAmelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowerCamelCase__ : Optional[Any] = (
np.array(_lowerCAmelCase , dtype=np.intaa )
if self._get_padding_strategies(_lowerCAmelCase , max_length=_lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowerCamelCase__ : Union[str, Any] = self.normalize(
padded_inputs['input_features'] , attention_mask=_lowerCAmelCase )
if return_tensors is not None:
lowerCamelCase__ : int = padded_inputs.convert_to_tensors(_lowerCAmelCase )
return padded_inputs
| 295 |
from collections import Counter
from timeit import timeit
def snake_case__ ( UpperCAmelCase : str = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def snake_case__ ( UpperCAmelCase : str = "" ):
if len(UpperCAmelCase ) == 0:
return True
lowerCAmelCase__ :List[str] = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowerCAmelCase__ :dict[str, int] = {}
for character in lower_case_input_str:
lowerCAmelCase__ :Tuple = character_freq_dict.get(UpperCAmelCase , 0 ) + 1
lowerCAmelCase__ :Dict = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def snake_case__ ( UpperCAmelCase : str = "" ):
print("\nFor string = " , UpperCAmelCase , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(UpperCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(UpperCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
_a : Any = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
_a : Tuple = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 145 | 0 |
def __lowerCAmelCase ( snake_case : str ) -> bool:
__lowerCamelCase: str = [int(snake_case ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(snake_case ) == 4 and all(0 <= int(snake_case ) <= 254 for octet in octets )
if __name__ == "__main__":
_A : Optional[int] = input().strip()
_A : List[Any] = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 189 |
_A : Tuple = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def __lowerCAmelCase ( ) -> None:
__lowerCamelCase: Optional[int] = input("""Enter message: """ )
__lowerCamelCase: Dict = input("""Enter key [alphanumeric]: """ )
__lowerCamelCase: List[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__lowerCamelCase: Optional[int] = """encrypt"""
__lowerCamelCase: Optional[int] = encrypt_message(snake_case , snake_case )
elif mode.lower().startswith("""d""" ):
__lowerCamelCase: Union[str, Any] = """decrypt"""
__lowerCamelCase: Optional[Any] = decrypt_message(snake_case , snake_case )
print(f'\n{mode.title()}ed message:' )
print(snake_case )
def __lowerCAmelCase ( snake_case : str , snake_case : str ) -> str:
return translate_message(snake_case , snake_case , """encrypt""" )
def __lowerCAmelCase ( snake_case : str , snake_case : str ) -> str:
return translate_message(snake_case , snake_case , """decrypt""" )
def __lowerCAmelCase ( snake_case : str , snake_case : str , snake_case : str ) -> str:
__lowerCamelCase: Any = []
__lowerCamelCase: Optional[int] = 0
__lowerCamelCase: Any = key.upper()
for symbol in message:
__lowerCamelCase: int = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case ):
__lowerCamelCase: Union[str, Any] = 0
else:
translated.append(snake_case )
return "".join(snake_case )
if __name__ == "__main__":
main()
| 189 | 1 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class __lowerCamelCase ( lowerCAmelCase ):
a__: str = 'efficientformer'
def __init__( self , UpperCAmelCase = [3, 2, 6, 4] , UpperCAmelCase = [48, 96, 224, 448] , UpperCAmelCase = [True, True, True, True] , UpperCAmelCase = 448 , UpperCAmelCase = 32 , UpperCAmelCase = 4 , UpperCAmelCase = 7 , UpperCAmelCase = 5 , UpperCAmelCase = 8 , UpperCAmelCase = 4 , UpperCAmelCase = 0.0 , UpperCAmelCase = 16 , UpperCAmelCase = 3 , UpperCAmelCase = 3 , UpperCAmelCase = 3 , UpperCAmelCase = 2 , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 1 , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = 1e-5 , UpperCAmelCase = "gelu" , UpperCAmelCase = 0.0_2 , UpperCAmelCase = 1e-1_2 , UpperCAmelCase = 224 , UpperCAmelCase = 1e-0_5 , **UpperCAmelCase , ):
super().__init__(**UpperCAmelCase )
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = depths
lowerCamelCase_ = mlp_expansion_ratio
lowerCamelCase_ = downsamples
lowerCamelCase_ = dim
lowerCamelCase_ = key_dim
lowerCamelCase_ = attention_ratio
lowerCamelCase_ = resolution
lowerCamelCase_ = pool_size
lowerCamelCase_ = downsample_patch_size
lowerCamelCase_ = downsample_stride
lowerCamelCase_ = downsample_pad
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = num_metaad_blocks
lowerCamelCase_ = distillation
lowerCamelCase_ = use_layer_scale
lowerCamelCase_ = layer_scale_init_value
lowerCamelCase_ = image_size
lowerCamelCase_ = batch_norm_eps
| 29 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase ( lowerCAmelCase ):
a__: bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
a__: Optional[Union[str, Path, GenerationConfig]] = field(
default=lowerCAmelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = v.to_dict()
return d
| 29 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _lowerCAmelCase ( ) ->Dict:
A__ : List[str] = ArgumentParser("""Accelerate CLI tool""", usage="""accelerate <command> [<args>]""", allow_abbrev=lowerCAmelCase_ )
A__ : Optional[Any] = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase_ )
env_command_parser(subparsers=lowerCAmelCase_ )
launch_command_parser(subparsers=lowerCAmelCase_ )
tpu_command_parser(subparsers=lowerCAmelCase_ )
test_command_parser(subparsers=lowerCAmelCase_ )
# Let's go
A__ : Any = parser.parse_args()
if not hasattr(lowerCAmelCase_, """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 707 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : List[Any] ) ->List[Any]:
A__ : Union[str, Any] = [1]
for i in range(2, UpperCAmelCase__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
A__ : Optional[int] = []
A__ : List[str] = list(range(UpperCAmelCase__ ) )
# Find permutation
while factorials:
A__ : Optional[int] = factorials.pop()
A__ , A__ : Optional[int] = divmod(UpperCAmelCase__, UpperCAmelCase__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 498 | 0 |